summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap4
-rw-r--r--CREDITS4
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt11
-rw-r--r--Documentation/admin-guide/pm/amd-pstate.rst30
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/aspeed,ast2600-adc.yaml7
-rw-r--r--Documentation/devicetree/bindings/input/goodix,gt7375p.yaml5
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/allwinner,sun50i-h6-vpu-g2.yaml5
-rw-r--r--Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/realtek,rt1015p.yaml2
-rw-r--r--Documentation/driver-api/miscellaneous.rst5
-rw-r--r--Documentation/networking/generic_netlink.rst2
-rw-r--r--Documentation/process/code-of-conduct-interpretation.rst2
-rw-r--r--Documentation/translations/zh_CN/loongarch/introduction.rst4
-rw-r--r--MAINTAINERS76
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/am335x-pcm-953.dtsi28
-rw-r--r--arch/arm/boot/dts/at91rm9200.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi9
-rw-r--r--arch/arm/boot/dts/imx6q-prti6q.dts4
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi4
-rw-r--r--arch/arm/boot/dts/lan966x-pcb8291.dts20
-rw-r--r--arch/arm/boot/dts/rk3036-evb.dts3
-rw-r--r--arch/arm/boot/dts/rk3066a-mk808.dts2
-rw-r--r--arch/arm/boot/dts/rk3188-radxarock.dts2
-rw-r--r--arch/arm/boot/dts/rk3188.dtsi3
-rw-r--r--arch/arm/boot/dts/rk3288-evb-act8846.dts2
-rw-r--r--arch/arm/boot/dts/rk3288-evb.dtsi6
-rw-r--r--arch/arm/boot/dts/rk3288-firefly.dtsi3
-rw-r--r--arch/arm/boot/dts/rk3288-miqi.dts3
-rw-r--r--arch/arm/boot/dts/rk3288-rock2-square.dts3
-rw-r--r--arch/arm/boot/dts/rk3288-vmarc-som.dtsi1
-rw-r--r--arch/arm/boot/dts/rk3xxx.dtsi7
-rw-r--r--arch/arm/boot/dts/sama7g5-pinfunc.h2
-rw-r--r--arch/arm/include/asm/perf_event.h2
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h6
-rw-r--r--arch/arm/include/asm/pgtable.h16
-rw-r--r--arch/arm/mach-at91/pm_suspend.S7
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c4
-rw-r--r--arch/arm/mm/nommu.c19
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts32
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk.dts6
-rw-r--r--[-rwxr-xr-x]arch/arm64/boot/dts/freescale/imx93-pinfunc.h0
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sa8155p-adp.dts13
-rw-r--r--arch/arm64/boot/dts/qcom/sa8295p-adp.dts12
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-crd.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp.dtsi36
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350-hdk.dts12
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-evb.dts10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308-evb.dts12
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-r88.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4b.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts1
-rw-r--r--arch/arm64/include/asm/efi.h8
-rw-r--r--arch/arm64/include/asm/pgtable.h4
-rw-r--r--arch/arm64/kernel/efi-rt-wrapper.S33
-rw-r--r--arch/arm64/kernel/efi.c26
-rw-r--r--arch/arm64/kernel/entry-ftrace.S2
-rw-r--r--arch/arm64/mm/fault.c4
-rw-r--r--arch/loongarch/Makefile2
-rw-r--r--arch/loongarch/include/asm/irq.h2
-rw-r--r--arch/loongarch/include/asm/pgtable.h17
-rw-r--r--arch/loongarch/include/asm/smp.h30
-rw-r--r--arch/loongarch/kernel/acpi.c31
-rw-r--r--arch/loongarch/kernel/irq.c2
-rw-r--r--arch/loongarch/kernel/process.c9
-rw-r--r--arch/loongarch/kernel/setup.c1
-rw-r--r--arch/loongarch/kernel/smp.c44
-rw-r--r--arch/loongarch/kernel/unwind_prologue.c3
-rw-r--r--arch/microblaze/Makefile4
-rw-r--r--arch/mips/include/asm/pgtable.h1
-rw-r--r--arch/nios2/boot/Makefile2
-rw-r--r--arch/powerpc/include/asm/interrupt.h1
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c52
-rw-r--r--arch/riscv/Kconfig6
-rw-r--r--arch/riscv/include/asm/asm.h1
-rw-r--r--arch/riscv/include/asm/efi.h6
-rw-r--r--arch/riscv/include/asm/pgalloc.h11
-rw-r--r--arch/riscv/include/asm/pgtable.h1
-rw-r--r--arch/riscv/include/asm/smp.h3
-rw-r--r--arch/riscv/kernel/entry.S13
-rw-r--r--arch/riscv/kernel/machine_kexec.c46
-rw-r--r--arch/riscv/kernel/setup.c9
-rw-r--r--arch/riscv/kernel/smp.c97
-rw-r--r--arch/riscv/kernel/traps.c18
-rw-r--r--arch/riscv/kernel/vdso/Makefile1
-rw-r--r--arch/s390/include/asm/pgtable.h1
-rw-r--r--arch/s390/include/asm/processor.h11
-rw-r--r--arch/s390/kernel/crash_dump.c2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h1
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/events/amd/core.c5
-rw-r--r--arch/x86/events/amd/uncore.c1
-rw-r--r--arch/x86/events/intel/pt.c9
-rw-r--r--arch/x86/hyperv/hv_init.c54
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/msr-index.h8
-rw-r--r--arch/x86/include/asm/nospec-branch.h2
-rw-r--r--arch/x86/include/asm/pgtable.h9
-rw-r--r--arch/x86/include/asm/qspinlock_paravirt.h2
-rw-r--r--arch/x86/kernel/cpu/amd.c6
-rw-r--r--arch/x86/kernel/cpu/bugs.c21
-rw-r--r--arch/x86/kernel/cpu/hygon.c4
-rw-r--r--arch/x86/kernel/cpu/sgx/ioctl.c3
-rw-r--r--arch/x86/kernel/cpu/tsx.c38
-rw-r--r--arch/x86/kernel/fpu/core.c2
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kvm/mmu/mmu.c13
-rw-r--r--arch/x86/kvm/svm/nested.c12
-rw-r--r--arch/x86/kvm/svm/svm.c26
-rw-r--r--arch/x86/kvm/vmx/nested.c4
-rw-r--r--arch/x86/kvm/x86.c31
-rw-r--r--arch/x86/kvm/xen.c32
-rw-r--r--arch/x86/mm/ioremap.c8
-rw-r--r--arch/x86/net/bpf_jit_comp.c13
-rw-r--r--arch/x86/power/cpu.c22
-rw-r--r--arch/x86/xen/enlighten_pv.c3
-rw-r--r--arch/x86/xen/setup.c3
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-mq.c7
-rw-r--r--block/blk-settings.c9
-rw-r--r--block/blk.h1
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/accessibility/speakup/utils.h2
-rw-r--r--drivers/acpi/numa/hmat.c27
-rw-r--r--drivers/android/binder_alloc.c7
-rw-r--r--drivers/block/drbd/drbd_main.c4
-rw-r--r--drivers/block/ublk_drv.c82
-rw-r--r--drivers/bus/intel-ixp4xx-eb.c9
-rw-r--r--drivers/bus/sunxi-rsb.c38
-rw-r--r--drivers/char/tpm/tpm-interface.c5
-rw-r--r--drivers/clk/at91/at91rm9200.c2
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c6
-rw-r--r--drivers/clk/qcom/gdsc.c61
-rw-r--r--drivers/clk/qcom/gdsc.h2
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c6
-rw-r--r--drivers/clk/samsung/clk-exynos7885.c4
-rw-r--r--drivers/clocksource/arm_arch_timer.c7
-rw-r--r--drivers/clocksource/timer-riscv.c2
-rw-r--r--drivers/cpufreq/Kconfig.x862
-rw-r--r--drivers/cpufreq/amd-pstate.c49
-rw-r--r--drivers/dax/hmem/device.c24
-rw-r--r--drivers/dma-buf/dma-buf.c23
-rw-r--r--drivers/dma-buf/dma-heap.c28
-rw-r--r--drivers/extcon/extcon-usbc-tusb320.c8
-rw-r--r--drivers/firmware/google/coreboot_table.c37
-rw-r--r--drivers/fpga/Kconfig4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c9
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c87
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c254
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c23
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h117
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c30
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c30
-rw-r--r--drivers/gpu/drm/display/drm_dp_dual_mode_helper.c51
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c2
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_internal.h3
-rw-r--r--drivers/gpu/drm/drm_mode_config.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c3
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.c15
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c8
-rw-r--r--drivers/gpu/host1x/dev.c4
-rw-r--r--drivers/hv/channel_mgmt.c6
-rw-r--r--drivers/hv/vmbus_drv.c1
-rw-r--r--drivers/hwmon/asus-ec-sensors.c2
-rw-r--r--drivers/hwmon/coretemp.c9
-rw-r--r--drivers/hwmon/i5500_temp.c2
-rw-r--r--drivers/hwmon/ibmpex.c1
-rw-r--r--drivers/hwmon/ina3221.c4
-rw-r--r--drivers/hwmon/ltc2947-core.c2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c11
-rw-r--r--drivers/i2c/busses/i2c-imx.c6
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c11
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c1
-rw-r--r--drivers/i2c/i2c-core-base.c9
-rw-r--r--drivers/iio/accel/bma400_core.c28
-rw-r--r--drivers/iio/adc/aspeed_adc.c11
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c6
-rw-r--r--drivers/iio/adc/at91_adc.c4
-rw-r--r--drivers/iio/adc/mp2629_adc.c5
-rw-r--r--drivers/iio/health/afe4403.c5
-rw-r--r--drivers/iio/health/afe4404.c12
-rw-r--r--drivers/iio/imu/bno055/bno055.c2
-rw-r--r--drivers/iio/industrialio-sw-trigger.c6
-rw-r--r--drivers/iio/light/Kconfig2
-rw-r--r--drivers/iio/light/apds9960.c12
-rw-r--r--drivers/iio/pressure/ms5611.h12
-rw-r--r--drivers/iio/pressure/ms5611_core.c51
-rw-r--r--drivers/iio/pressure/ms5611_spi.c2
-rw-r--r--drivers/iio/trigger/iio-trig-sysfs.c6
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c8
-rw-r--r--drivers/input/misc/soc_button_array.c14
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h8
-rw-r--r--drivers/input/serio/i8042.c4
-rw-r--r--drivers/input/touchscreen/goodix.c11
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c4
-rw-r--r--drivers/iommu/intel/dmar.c1
-rw-r--r--drivers/iommu/intel/iommu.c81
-rw-r--r--drivers/iommu/intel/iommu.h4
-rw-r--r--drivers/iommu/intel/pasid.c5
-rw-r--r--drivers/iommu/intel/svm.c19
-rw-r--r--drivers/isdn/mISDN/core.c2
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c3
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-crypt.c1
-rw-r--r--drivers/md/dm-integrity.c21
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/dm-log-writes.c1
-rw-r--r--drivers/media/common/videobuf2/frame_vector.c68
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c2
-rw-r--r--drivers/mmc/core/core.c17
-rw-r--r--drivers/mmc/core/mmc_test.c3
-rw-r--r--drivers/mmc/host/mtk-sd.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c7
-rw-r--r--drivers/mmc/host/sdhci-sprd.c4
-rw-r--r--drivers/mmc/host/sdhci.c61
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/nand/onenand/Kconfig1
-rw-r--r--drivers/mtd/nand/raw/nand_base.c4
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c12
-rw-r--r--drivers/net/arcnet/com20020_cs.c11
-rw-r--r--drivers/net/bonding/bond_main.c17
-rw-r--r--drivers/net/can/can327.c4
-rw-r--r--drivers/net/can/cc770/cc770_isa.c10
-rw-r--r--drivers/net/can/m_can/m_can.c2
-rw-r--r--drivers/net/can/m_can/m_can_pci.c9
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c10
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c5
-rw-r--r--drivers/net/can/usb/mcba_usb.c10
-rw-r--r--drivers/net/dsa/lan9303-core.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c6
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c34
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c4
-rw-r--r--drivers/net/ethernet/davicom/dm9051.c4
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c57
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c21
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c167
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c9
-rw-r--r--drivers/net/ethernet/intel/e100.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c11
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c8
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c17
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c19
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c14
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c9
-rw-r--r--drivers/net/ethernet/ni/nixge.c29
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c24
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c15
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2
-rw-r--r--drivers/net/ipvlan/ipvlan.h1
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/mctp/mctp-i2c.c47
-rw-r--r--drivers/net/mdio/fwnode_mdio.c2
-rw-r--r--drivers/net/mhi_net.c2
-rw-r--r--drivers/net/netdevsim/dev.c1
-rw-r--r--drivers/net/ntb_netdev.c9
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/dp83867.c7
-rw-r--r--drivers/net/phy/marvell.c16
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/phylink.c22
-rw-r--r--drivers/net/thunderbolt.c19
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c1
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/smsc95xx.c46
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c39
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c21
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c26
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol.h2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c2
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c4
-rw-r--r--drivers/nfc/nxp-nci/core.c8
-rw-r--r--drivers/nfc/s3fwrn5/core.c1
-rw-r--r--drivers/nfc/st-nci/se.c49
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/multipath.c3
-rw-r--r--drivers/nvme/host/pci.c6
-rw-r--r--drivers/nvme/target/auth.c2
-rw-r--r--drivers/nvmem/lan9662-otpc.c4
-rw-r--r--drivers/nvmem/rmem.c4
-rw-r--r--drivers/nvmem/u-boot-env.c2
-rw-r--r--drivers/of/property.c4
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c90
-rw-r--r--drivers/pinctrl/devicetree.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c27
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c43
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.h6
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt2701.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt2712.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6765.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6779.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6795.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7629.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7986.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8127.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8135.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8167.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8173.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8183.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8186.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8188.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8192.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8195.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8365.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8516.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c3
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c40
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp.c4
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c24
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c37
-rw-r--r--drivers/platform/x86/acer-wmi.c9
-rw-r--r--drivers/platform/x86/amd/pmc.c3
-rw-r--r--drivers/platform/x86/asus-wmi.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c3
-rw-r--r--drivers/platform/x86/ideapad-laptop.c37
-rw-r--r--drivers/platform/x86/intel/pmc/pltdrv.c9
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c8
-rw-r--r--drivers/power/supply/ab8500_btemp.c9
-rw-r--r--drivers/power/supply/ip5xxx_power.c2
-rw-r--r--drivers/power/supply/rk817_charger.c14
-rw-r--r--drivers/regulator/core.c8
-rw-r--r--drivers/regulator/rt5759-regulator.c1
-rw-r--r--drivers/regulator/slg51000-regulator.c2
-rw-r--r--drivers/regulator/twl6030-regulator.c17
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c43
-rw-r--r--drivers/s390/block/dasd_ioctl.c2
-rw-r--r--drivers/s390/block/dcssblk.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c5
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c3
-rw-r--r--drivers/scsi/scsi_debug.c6
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c31
-rw-r--r--drivers/scsi/storvsc_drv.c69
-rw-r--r--drivers/siox/siox-core.c2
-rw-r--r--drivers/slimbus/Kconfig2
-rw-r--r--drivers/slimbus/stream.c8
-rw-r--r--drivers/soc/imx/soc-imx8m.c11
-rw-r--r--drivers/spi/spi-dw-dma.c3
-rw-r--r--drivers/spi/spi-imx.c13
-rw-r--r--drivers/spi/spi-mt65xx.c8
-rw-r--r--drivers/spi/spi-tegra210-quad.c3
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c9
-rw-r--r--drivers/target/loopback/tcm_loop.c3
-rw-r--r--drivers/tee/optee/device.c2
-rw-r--r--drivers/tty/n_gsm.c71
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c17
-rw-r--r--drivers/tty/serial/8250/8250_omap.c52
-rw-r--r--drivers/tty/serial/8250/8250_port.c7
-rw-r--r--drivers/tty/serial/fsl_lpuart.c76
-rw-r--r--drivers/tty/serial/imx.c1
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c12
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c17
-rw-r--r--drivers/usb/cdns3/host.c56
-rw-r--r--drivers/usb/chipidea/otg_fsm.c2
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc3/core.c10
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c11
-rw-r--r--drivers/usb/dwc3/gadget.c17
-rw-r--r--drivers/usb/dwc3/host.c10
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c72
-rw-r--r--drivers/usb/host/bcma-hcd.c10
-rw-r--r--drivers/usb/serial/option.c19
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c15
-rw-r--r--drivers/usb/typec/tipd/core.c6
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c10
-rw-r--r--drivers/vfio/vfio_main.c26
-rw-r--r--drivers/video/fbdev/core/fbcon.c2
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c84
-rw-r--r--drivers/xen/pcpu.c2
-rw-r--r--drivers/xen/platform-pci.c10
-rw-r--r--drivers/xen/xen-pciback/conf_space_capability.c9
-rw-r--r--fs/afs/fs_probe.c4
-rw-r--r--fs/afs/server.c2
-rw-r--r--fs/btrfs/ctree.c36
-rw-r--r--fs/btrfs/ioctl.c23
-rw-r--r--fs/btrfs/qgroup.c9
-rw-r--r--fs/btrfs/send.c24
-rw-r--r--fs/btrfs/sysfs.c7
-rw-r--r--fs/btrfs/tree-log.c59
-rw-r--r--fs/btrfs/zoned.c9
-rw-r--r--fs/ceph/caps.c48
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/snap.c3
-rw-r--r--fs/cifs/cifsfs.c4
-rw-r--r--fs/cifs/connect.c14
-rw-r--r--fs/cifs/ioctl.c4
-rw-r--r--fs/cifs/sess.c4
-rw-r--r--fs/cifs/smb2ops.c4
-rw-r--r--fs/erofs/fscache.c35
-rw-r--r--fs/erofs/internal.h6
-rw-r--r--fs/erofs/super.c39
-rw-r--r--fs/erofs/sysfs.c8
-rw-r--r--fs/erofs/zdata.c3
-rw-r--r--fs/ext4/extents.c18
-rw-r--r--fs/file.c11
-rw-r--r--fs/fs-writeback.c30
-rw-r--r--fs/fscache/volume.c7
-rw-r--r--fs/fuse/file.c37
-rw-r--r--fs/kernfs/dir.c14
-rw-r--r--fs/ksmbd/vfs.c6
-rw-r--r--fs/namei.c3
-rw-r--r--fs/netfs/buffered_read.c20
-rw-r--r--fs/netfs/io.c3
-rw-r--r--fs/nfsd/trace.h5
-rw-r--r--fs/nfsd/vfs.c11
-rw-r--r--fs/nilfs2/dat.c7
-rw-r--r--fs/nilfs2/sufile.c8
-rw-r--r--fs/proc/meminfo.c2
-rw-r--r--fs/read_write.c19
-rw-r--r--fs/zonefs/super.c60
-rw-r--r--fs/zonefs/sysfs.c5
-rw-r--r--fs/zonefs/zonefs.h6
-rw-r--r--include/asm-generic/tlb.h4
-rw-r--r--include/linux/blkdev.h16
-rw-r--r--include/linux/bpf.h60
-rw-r--r--include/linux/can/platform/sja1000.h2
-rw-r--r--include/linux/fault-inject.h7
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/fscache.h2
-rw-r--r--include/linux/gfp.h18
-rw-r--r--include/linux/io_uring.h3
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mm.h29
-rw-r--r--include/linux/mmc/mmc.h2
-rw-r--r--include/linux/pgtable.h18
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/trace.h4
-rw-r--r--include/linux/vfio.h1
-rw-r--r--include/net/inet_hashtables.h3
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/sctp/stream_sched.h2
-rw-r--r--include/net/sock.h2
-rw-r--r--include/soc/at91/sama7-ddr.h5
-rw-r--r--include/sound/sof/dai.h2
-rw-r--r--include/sound/sof/info.h4
-rw-r--r--include/trace/events/huge_memory.h8
-rw-r--r--include/uapi/linux/ip.h6
-rw-r--r--include/uapi/linux/ipv6.h6
-rw-r--r--init/Kconfig2
-rw-r--r--io_uring/filetable.c2
-rw-r--r--io_uring/io_uring.c2
-rw-r--r--io_uring/io_uring.h13
-rw-r--r--io_uring/net.c23
-rw-r--r--io_uring/poll.c59
-rw-r--r--ipc/shm.c34
-rw-r--r--kernel/bpf/bpf_local_storage.c2
-rw-r--r--kernel/bpf/dispatcher.c28
-rw-r--r--kernel/bpf/percpu_freelist.c23
-rw-r--r--kernel/bpf/verifier.c14
-rw-r--r--kernel/events/core.c65
-rw-r--r--kernel/gcov/clang.c2
-rw-r--r--kernel/kprobes.c8
-rw-r--r--kernel/rseq.c19
-rw-r--r--kernel/sched/core.c52
-rw-r--r--kernel/sched/cpufreq_schedutil.c30
-rw-r--r--kernel/trace/ftrace.c5
-rw-r--r--kernel/trace/kprobe_event_gen_test.c48
-rw-r--r--kernel/trace/rethook.c4
-rw-r--r--kernel/trace/ring_buffer.c71
-rw-r--r--kernel/trace/synth_event_gen_test.c16
-rw-r--r--kernel/trace/trace.c23
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_dynevent.c2
-rw-r--r--kernel/trace/trace_eprobe.c8
-rw-r--r--kernel/trace/trace_events.c13
-rw-r--r--kernel/trace/trace_events_hist.c7
-rw-r--r--kernel/trace/trace_events_synth.c7
-rw-r--r--kernel/trace/trace_events_user.c4
-rw-r--r--kernel/trace/trace_osnoise.c6
-rw-r--r--kernel/trace/trace_syscalls.c2
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/fault-inject.c13
-rw-r--r--lib/vdso/Makefile2
-rw-r--r--mm/compaction.c22
-rw-r--r--mm/damon/sysfs.c50
-rw-r--r--mm/failslab.c12
-rw-r--r--mm/hugetlb.c31
-rw-r--r--mm/kfence/report.c13
-rw-r--r--mm/khugepaged.c97
-rw-r--r--mm/maccess.c2
-rw-r--r--mm/madvise.c6
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory.c27
-rw-r--r--mm/migrate_device.c8
-rw-r--r--mm/mmap.c5
-rw-r--r--mm/mmu_gather.c4
-rw-r--r--mm/page_alloc.c7
-rw-r--r--mm/page_ext.c2
-rw-r--r--mm/swapfile.c8
-rw-r--r--mm/vmscan.c82
-rw-r--r--net/9p/trans_fd.c28
-rw-r--r--net/9p/trans_xen.c9
-rw-r--r--net/bpf/test_run.c1
-rw-r--r--net/bridge/br_vlan.c17
-rw-r--r--net/caif/chnl_net.c3
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/lwtunnel.c4
-rw-r--r--net/core/neighbour.c58
-rw-r--r--net/dccp/ipv4.c23
-rw-r--r--net/dccp/ipv6.c24
-rw-r--r--net/dccp/proto.c3
-rw-r--r--net/dsa/dsa2.c10
-rw-r--r--net/dsa/dsa_priv.h1
-rw-r--r--net/dsa/master.c3
-rw-r--r--net/dsa/port.c16
-rw-r--r--net/hsr/hsr_forward.c5
-rw-r--r--net/ipv4/Kconfig10
-rw-r--r--net/ipv4/af_inet.c11
-rw-r--r--net/ipv4/esp4_offload.c3
-rw-r--r--net/ipv4/fib_semantics.c8
-rw-r--r--net/ipv4/fib_trie.c6
-rw-r--r--net/ipv4/inet_hashtables.c94
-rw-r--r--net/ipv4/ip_input.c5
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c4
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_ipv4.c21
-rw-r--r--net/ipv6/esp6_offload.c3
-rw-r--r--net/ipv6/tcp_ipv6.c20
-rw-r--r--net/ipv6/xfrm6_policy.c6
-rw-r--r--net/kcm/kcmsock.c58
-rw-r--r--net/key/af_key.c34
-rw-r--r--net/l2tp/l2tp_core.c22
-rw-r--r--net/mac80211/airtime.c3
-rw-r--r--net/mptcp/protocol.c13
-rw-r--r--net/mptcp/subflow.c6
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c24
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/nf_flow_table_offload.c4
-rw-r--r--net/netfilter/nf_tables_api.c6
-rw-r--r--net/netfilter/nft_ct.c6
-rw-r--r--net/netfilter/xt_connmark.c18
-rw-r--r--net/nfc/nci/core.c2
-rw-r--r--net/nfc/nci/data.c4
-rw-r--r--net/openvswitch/conntrack.c8
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/conn_client.c38
-rw-r--r--net/sched/Kconfig2
-rw-r--r--net/sched/act_connmark.c4
-rw-r--r--net/sched/act_ct.c8
-rw-r--r--net/sched/act_ctinfo.c6
-rw-r--r--net/sctp/stream.c25
-rw-r--r--net/sctp/stream_sched.c5
-rw-r--r--net/sctp/stream_sched_prio.c19
-rw-r--r--net/sctp/stream_sched_rr.c5
-rw-r--r--net/tipc/crypto.c3
-rw-r--r--net/tipc/discover.c5
-rw-r--r--net/tipc/topsrv.c20
-rw-r--r--net/tls/tls_device_fallback.c5
-rw-r--r--net/wireless/scan.c10
-rw-r--r--net/x25/x25_dev.c2
-rw-r--r--net/xfrm/xfrm_device.c15
-rw-r--r--net/xfrm/xfrm_replay.c2
-rw-r--r--scripts/Makefile.package4
-rwxr-xr-xscripts/faddr2line7
-rwxr-xr-xscripts/package/mkdebian2
-rw-r--r--sound/core/seq/seq_memory.c11
-rw-r--r--sound/firewire/dice/dice-stream.c12
-rw-r--r--sound/hda/intel-dsp-config.c5
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c7
-rw-r--r--sound/soc/codecs/cs42l51.c2
-rw-r--r--sound/soc/codecs/hdac_hda.h4
-rw-r--r--sound/soc/codecs/max98373-i2c.c4
-rw-r--r--sound/soc/codecs/rt5514-spi.c15
-rw-r--r--sound/soc/codecs/rt5677-spi.c19
-rw-r--r--sound/soc/codecs/rt711-sdca-sdw.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c1
-rw-r--r--sound/soc/codecs/tas2764.c19
-rw-r--r--sound/soc/codecs/tas2770.c20
-rw-r--r--sound/soc/codecs/tas2780.c19
-rw-r--r--sound/soc/codecs/tlv320adc3xxx.c3
-rw-r--r--sound/soc/codecs/wm8962.c8
-rw-r--r--sound/soc/fsl/fsl_asrc.c2
-rw-r--r--sound/soc/fsl/fsl_esai.c2
-rw-r--r--sound/soc/fsl/fsl_micfil.c19
-rw-r--r--sound/soc/fsl/fsl_sai.c2
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c7
-rw-r--r--sound/soc/intel/boards/sof_es8336.c60
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-icl-match.c13
-rw-r--r--sound/soc/soc-core.c17
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--sound/soc/soc-ops.c11
-rw-r--r--sound/soc/soc-pcm.c7
-rw-r--r--sound/soc/soc-utils.c2
-rw-r--r--sound/soc/sof/ipc3-topology.c15
-rw-r--r--sound/soc/sof/topology.c20
-rw-r--r--sound/soc/stm/stm32_adfsdm.c11
-rw-r--r--sound/soc/stm/stm32_i2s.c2
-rw-r--r--sound/usb/midi.c4
-rw-r--r--tools/arch/x86/include/asm/msr-index.h8
-rw-r--r--tools/iio/iio_generic_buffer.c4
-rw-r--r--tools/lib/bpf/libbpf.c2
-rw-r--r--tools/lib/bpf/libbpf_probes.c2
-rw-r--r--tools/lib/bpf/ringbuf.c26
-rw-r--r--tools/testing/selftests/bpf/map_tests/sk_storage_map.c36
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/varlen.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_varlen.c5
-rw-r--r--tools/testing/selftests/bpf/test_progs.c2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c2
-rw-r--r--tools/testing/selftests/kvm/.gitignore1
-rw-r--r--tools/testing/selftests/kvm/Makefile1
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h13
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c13
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c67
-rw-r--r--tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c73
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh11
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh11
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh6
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh9
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh5
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh10
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh8
-rwxr-xr-xtools/testing/selftests/net/udpgro_bench.sh8
-rwxr-xr-xtools/testing/selftests/net/udpgro_frglist.sh8
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh3
-rwxr-xr-xtools/testing/selftests/net/veth.sh11
-rw-r--r--tools/vm/slabinfo-gnuplot.sh4
-rw-r--r--virt/kvm/kvm_main.c52
-rw-r--r--virt/kvm/pfncache.c7
799 files changed, 6672 insertions, 3531 deletions
diff --git a/.mailmap b/.mailmap
index fdd7989492fc..bbb6a077d227 100644
--- a/.mailmap
+++ b/.mailmap
@@ -29,6 +29,7 @@ Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electr
Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
+Alex Hung <alexhung@gmail.com> <alex.hung@canonical.com>
Alex Shi <alexs@kernel.org> <alex.shi@intel.com>
Alex Shi <alexs@kernel.org> <alex.shi@linaro.org>
Alex Shi <alexs@kernel.org> <alex.shi@linux.alibaba.com>
@@ -382,6 +383,7 @@ Santosh Shilimkar <santosh.shilimkar@oracle.org>
Santosh Shilimkar <ssantosh@kernel.org>
Sarangdhar Joshi <spjoshi@codeaurora.org>
Sascha Hauer <s.hauer@pengutronix.de>
+Satya Priya <quic_c_skakit@quicinc.com> <skakit@codeaurora.org>
S.Çağlar Onur <caglar@pardus.org.tr>
Sean Christopherson <seanjc@google.com> <sean.j.christopherson@intel.com>
Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
@@ -389,6 +391,7 @@ Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
Sebastian Reichel <sre@kernel.org> <sre@debian.org>
Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
+Shannon Nelson <shannon.nelson@amd.com> <snelson@pensando.io>
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
@@ -416,6 +419,7 @@ TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
+Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Uwe Kleine-König <ukleinek@strlen.de>
diff --git a/CREDITS b/CREDITS
index 54672cbcd719..198f675c419e 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2452,6 +2452,10 @@ S: 482 Shadowgraph Dr.
S: San Jose, CA 95110
S: USA
+N: Michal Marek
+E: michal.lkml@markovi.net
+D: Kbuild Maintainer 2009-2017
+
N: Martin Mares
E: mj@ucw.cz
W: http://www.ucw.cz/~mj/
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a465d5242774..42af9ca0127e 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -6959,3 +6959,14 @@
memory, and other data can't be written using
xmon commands.
off xmon is disabled.
+
+ amd_pstate= [X86]
+ disable
+ Do not enable amd_pstate as the default
+ scaling driver for the supported processors
+ passive
+ Use amd_pstate as a scaling driver, driver requests a
+ desired performance on this abstract scale and the power
+ management firmware translates the requests into actual
+ hardware states (core frequency, data fabric and memory
+ clocks etc.)
diff --git a/Documentation/admin-guide/pm/amd-pstate.rst b/Documentation/admin-guide/pm/amd-pstate.rst
index 8f3d30c5a0d8..06e23538f79c 100644
--- a/Documentation/admin-guide/pm/amd-pstate.rst
+++ b/Documentation/admin-guide/pm/amd-pstate.rst
@@ -283,23 +283,19 @@ efficiency frequency management method on AMD processors.
Kernel Module Options for ``amd-pstate``
=========================================
-.. _shared_mem:
-
-``shared_mem``
-Use a module param (shared_mem) to enable related processors manually with
-**amd_pstate.shared_mem=1**.
-Due to the performance issue on the processors with `Shared Memory Support
-<perf_cap_>`_, we disable it presently and will re-enable this by default
-once we address performance issue with this solution.
-
-To check whether the current processor is using `Full MSR Support <perf_cap_>`_
-or `Shared Memory Support <perf_cap_>`_ : ::
-
- ray@hr-test1:~$ lscpu | grep cppc
- Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb cat_l3 cdp_l3 hw_pstate ssbd mba ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 erms invpcid cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr rdpru wbnoinvd cppc arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif v_spec_ctrl umip pku ospke vaes vpclmulqdq rdpid overflow_recov succor smca fsrm
-
-If the CPU flags have ``cppc``, then this processor supports `Full MSR Support
-<perf_cap_>`_. Otherwise, it supports `Shared Memory Support <perf_cap_>`_.
+Passive Mode
+------------
+
+``amd_pstate=passive``
+
+It will be enabled if the ``amd_pstate=passive`` is passed to the kernel in the command line.
+In this mode, ``amd_pstate`` driver software specifies a desired QoS target in the CPPC
+performance scale as a relative number. This can be expressed as percentage of nominal
+performance (infrastructure max). Below the nominal sustained performance level,
+desired performance expresses the average performance level of the processor subject
+to the Performance Reduction Tolerance register. Above the nominal performance level,
+processor must provide at least nominal performance requested and go higher if current
+operating conditions allow.
``cpupower`` tool support for ``amd-pstate``
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml
index 2ab4642679c0..55c4f94a14d1 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml
@@ -148,7 +148,7 @@ allOf:
items:
- const: oscclk
- const: dout_clkcmu_fsys1_bus
- - const: dout_clkcmu_fsys1_mmc_card
+ - const: gout_clkcmu_fsys1_mmc_card
- const: dout_clkcmu_fsys1_usbdrd
- if:
diff --git a/Documentation/devicetree/bindings/iio/adc/aspeed,ast2600-adc.yaml b/Documentation/devicetree/bindings/iio/adc/aspeed,ast2600-adc.yaml
index b283c8ca2bbf..5c08d8b6e995 100644
--- a/Documentation/devicetree/bindings/iio/adc/aspeed,ast2600-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/aspeed,ast2600-adc.yaml
@@ -62,13 +62,6 @@ properties:
description:
Inform the driver that last channel will be used to sensor battery.
- aspeed,trim-data-valid:
- type: boolean
- description: |
- The ADC reference voltage can be calibrated to obtain the trimming
- data which will be stored in otp. This property informs the driver that
- the data store in the otp is valid.
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml b/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml
index fe1c5016f7f3..1c191bc5a178 100644
--- a/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml
+++ b/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml
@@ -16,8 +16,11 @@ description:
properties:
compatible:
- items:
+ oneOf:
- const: goodix,gt7375p
+ - items:
+ - const: goodix,gt7986u
+ - const: goodix,gt7375p
reg:
enum:
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
index 2684562df4d9..be29e0b80995 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
@@ -24,7 +24,7 @@ properties:
oneOf:
- items:
- enum:
- - qcom,sc7280-bwmon
+ - qcom,sc7280-cpu-bwmon
- qcom,sdm845-bwmon
- const: qcom,msm8998-bwmon
- const: qcom,msm8998-bwmon # BWMON v4
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun50i-h6-vpu-g2.yaml b/Documentation/devicetree/bindings/media/allwinner,sun50i-h6-vpu-g2.yaml
index 24d7bf21499e..9d44236f2deb 100644
--- a/Documentation/devicetree/bindings/media/allwinner,sun50i-h6-vpu-g2.yaml
+++ b/Documentation/devicetree/bindings/media/allwinner,sun50i-h6-vpu-g2.yaml
@@ -36,6 +36,9 @@ properties:
resets:
maxItems: 1
+ iommus:
+ maxItems: 1
+
required:
- compatible
- reg
@@ -43,6 +46,7 @@ required:
- clocks
- clock-names
- resets
+ - iommus
additionalProperties: false
@@ -59,6 +63,7 @@ examples:
clocks = <&ccu CLK_BUS_VP9>, <&ccu CLK_VP9>;
clock-names = "bus", "mod";
resets = <&ccu RST_BUS_VP9>;
+ iommus = <&iommu 5>;
};
...
diff --git a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
index c3e9f3485449..dea293f403d9 100644
--- a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
+++ b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
@@ -8,7 +8,7 @@ title: Audio codec controlled by ChromeOS EC
maintainers:
- Cheng-Yi Chiang <cychiang@chromium.org>
- - Tzung-Bi Shih <tzungbi@google.com>
+ - Tzung-Bi Shih <tzungbi@kernel.org>
description: |
Google's ChromeOS EC codec is a digital mic codec provided by the
diff --git a/Documentation/devicetree/bindings/sound/realtek,rt1015p.yaml b/Documentation/devicetree/bindings/sound/realtek,rt1015p.yaml
index 1d73204451b1..ea7d4900ee4a 100644
--- a/Documentation/devicetree/bindings/sound/realtek,rt1015p.yaml
+++ b/Documentation/devicetree/bindings/sound/realtek,rt1015p.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Realtek rt1015p codec devicetree bindings
maintainers:
- - Tzung-Bi Shih <tzungbi@google.com>
+ - Tzung-Bi Shih <tzungbi@kernel.org>
description: |
Rt1015p is a rt1015 variant which does not support I2C and
diff --git a/Documentation/driver-api/miscellaneous.rst b/Documentation/driver-api/miscellaneous.rst
index 304ffb146cf9..4a5104a368ac 100644
--- a/Documentation/driver-api/miscellaneous.rst
+++ b/Documentation/driver-api/miscellaneous.rst
@@ -16,12 +16,11 @@ Parallel Port Devices
16x50 UART Driver
=================
-.. kernel-doc:: drivers/tty/serial/serial_core.c
- :export:
-
.. kernel-doc:: drivers/tty/serial/8250/8250_core.c
:export:
+See serial/driver.rst for related APIs.
+
Pulse-Width Modulation (PWM)
============================
diff --git a/Documentation/networking/generic_netlink.rst b/Documentation/networking/generic_netlink.rst
index 59e04ccf80c1..d960dbd7e80e 100644
--- a/Documentation/networking/generic_netlink.rst
+++ b/Documentation/networking/generic_netlink.rst
@@ -6,4 +6,4 @@ Generic Netlink
A wiki document on how to use Generic Netlink can be found here:
- * http://www.linuxfoundation.org/collaborate/workgroups/networking/generic_netlink_howto
+ * https://wiki.linuxfoundation.org/networking/generic_netlink_howto
diff --git a/Documentation/process/code-of-conduct-interpretation.rst b/Documentation/process/code-of-conduct-interpretation.rst
index 922e0b547bc3..66b07f14714c 100644
--- a/Documentation/process/code-of-conduct-interpretation.rst
+++ b/Documentation/process/code-of-conduct-interpretation.rst
@@ -51,7 +51,7 @@ the Technical Advisory Board (TAB) or other maintainers if you're
uncertain how to handle situations that come up. It will not be
considered a violation report unless you want it to be. If you are
uncertain about approaching the TAB or any other maintainers, please
-reach out to our conflict mediator, Joanna Lee <joanna.lee@gesmer.com>.
+reach out to our conflict mediator, Joanna Lee <jlee@linuxfoundation.org>.
In the end, "be kind to each other" is really what the end goal is for
everybody. We know everyone is human and we all fail at times, but the
diff --git a/Documentation/translations/zh_CN/loongarch/introduction.rst b/Documentation/translations/zh_CN/loongarch/introduction.rst
index 128878f5bb70..f3ec25b163d7 100644
--- a/Documentation/translations/zh_CN/loongarch/introduction.rst
+++ b/Documentation/translations/zh_CN/loongarch/introduction.rst
@@ -70,8 +70,8 @@ LA64中每个寄存器为64位宽。 ``$r0`` 的内容总是固定为0,而其
================= ================== =================== ==========
.. note::
- 注意:在一些遗留代码中有时可能见到 ``$v0`` 和 ``$v1`` ,它们是
- ``$a0`` 和 ``$a1`` 的别名,属于已经废弃的用法。
+ 注意:在一些遗留代码中有时可能见到 ``$fv0`` 和 ``$fv1`` ,它们是
+ ``$fa0`` 和 ``$fa1`` 的别名,属于已经废弃的用法。
向量寄存器
diff --git a/MAINTAINERS b/MAINTAINERS
index d5d6d5a68f4d..de4017953c39 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2197,7 +2197,7 @@ M: Wei Xu <xuwei5@hisilicon.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
W: http://www.hisilicon.com
-T: git git://github.com/hisilicon/linux-hisi.git
+T: git https://github.com/hisilicon/linux-hisi.git
F: arch/arm/boot/dts/hi3*
F: arch/arm/boot/dts/hip*
F: arch/arm/boot/dts/hisi*
@@ -4809,7 +4809,7 @@ R: Jeff Layton <jlayton@kernel.org>
L: ceph-devel@vger.kernel.org
S: Supported
W: http://ceph.com/
-T: git git://github.com/ceph/ceph-client.git
+T: git https://github.com/ceph/ceph-client.git
F: include/linux/ceph/
F: include/linux/crush/
F: net/ceph/
@@ -4821,7 +4821,7 @@ R: Jeff Layton <jlayton@kernel.org>
L: ceph-devel@vger.kernel.org
S: Supported
W: http://ceph.com/
-T: git git://github.com/ceph/ceph-client.git
+T: git https://github.com/ceph/ceph-client.git
F: Documentation/filesystems/ceph.rst
F: fs/ceph/
@@ -4911,7 +4911,7 @@ F: drivers/platform/chrome/
CHROMEOS EC CODEC DRIVER
M: Cheng-Yi Chiang <cychiang@chromium.org>
-M: Tzung-Bi Shih <tzungbi@google.com>
+M: Tzung-Bi Shih <tzungbi@kernel.org>
R: Guenter Roeck <groeck@chromium.org>
L: chrome-platform@lists.linux.dev
S: Maintained
@@ -5585,8 +5585,6 @@ F: drivers/scsi/cxgbi/cxgb3i
CXGB4 CRYPTO DRIVER (chcr)
M: Ayush Sawal <ayush.sawal@chelsio.com>
-M: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
-M: Rohit Maheshwari <rohitm@chelsio.com>
L: linux-crypto@vger.kernel.org
S: Supported
W: http://www.chelsio.com
@@ -5594,8 +5592,6 @@ F: drivers/crypto/chelsio
CXGB4 INLINE CRYPTO DRIVER
M: Ayush Sawal <ayush.sawal@chelsio.com>
-M: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
-M: Rohit Maheshwari <rohitm@chelsio.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.chelsio.com
@@ -10286,7 +10282,7 @@ T: git https://github.com/intel/gvt-linux.git
F: drivers/gpu/drm/i915/gvt/
INTEL HID EVENT DRIVER
-M: Alex Hung <alex.hung@canonical.com>
+M: Alex Hung <alexhung@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/intel/hid.c
@@ -11034,6 +11030,7 @@ KCONFIG
M: Masahiro Yamada <masahiroy@kernel.org>
L: linux-kbuild@vger.kernel.org
S: Maintained
+Q: https://patchwork.kernel.org/project/linux-kbuild/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git kconfig
F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include
@@ -11091,10 +11088,12 @@ F: fs/autofs/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
M: Masahiro Yamada <masahiroy@kernel.org>
-M: Michal Marek <michal.lkml@markovi.net>
+R: Nathan Chancellor <nathan@kernel.org>
R: Nick Desaulniers <ndesaulniers@google.com>
+R: Nicolas Schier <nicolas@fjasle.eu>
L: linux-kbuild@vger.kernel.org
S: Maintained
+Q: https://patchwork.kernel.org/project/linux-kbuild/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
F: Documentation/kbuild/
F: Makefile
@@ -13624,6 +13623,12 @@ S: Supported
F: drivers/misc/atmel-ssc.c
F: include/linux/atmel-ssc.h
+MICROCHIP SOC DRIVERS
+M: Conor Dooley <conor@kernel.org>
+S: Supported
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
+F: drivers/soc/microchip/
+
MICROCHIP USB251XB DRIVER
M: Richard Leitner <richard.leitner@skidata.com>
L: linux-usb@vger.kernel.org
@@ -15942,6 +15947,7 @@ Q: https://patchwork.kernel.org/project/linux-pci/list/
B: https://bugzilla.kernel.org
C: irc://irc.oftc.net/linux-pci
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git
+F: Documentation/devicetree/bindings/pci/
F: drivers/pci/controller/
F: drivers/pci/pci-bridge-emul.c
F: drivers/pci/pci-bridge-emul.h
@@ -16048,7 +16054,7 @@ F: Documentation/devicetree/bindings/pci/microchip*
F: drivers/pci/controller/*microchip*
PCIE DRIVER FOR QUALCOMM MSM
-M: Stanimir Varbanov <svarbanov@mm-sol.com>
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-pci@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
@@ -16138,7 +16144,8 @@ F: include/linux/peci-cpu.h
F: include/linux/peci.h
PENSANDO ETHERNET DRIVERS
-M: Shannon Nelson <snelson@pensando.io>
+M: Shannon Nelson <shannon.nelson@amd.com>
+M: Brett Creeley <brett.creeley@amd.com>
M: drivers@pensando.io
L: netdev@vger.kernel.org
S: Supported
@@ -17221,7 +17228,7 @@ R: Dongsheng Yang <dongsheng.yang@easystack.cn>
L: ceph-devel@vger.kernel.org
S: Supported
W: http://ceph.com/
-T: git git://github.com/ceph/ceph-client.git
+T: git https://github.com/ceph/ceph-client.git
F: Documentation/ABI/testing/sysfs-bus-rbd
F: drivers/block/rbd.c
F: drivers/block/rbd_types.h
@@ -17474,10 +17481,8 @@ S: Maintained
F: drivers/net/wireless/realtek/rtw89/
REDPINE WIRELESS DRIVER
-M: Amitkumar Karwar <amitkarwar@gmail.com>
-M: Siva Rebbagondla <siva8118@gmail.com>
L: linux-wireless@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/net/wireless/rsi/
REGISTER MAP ABSTRACTION
@@ -17722,7 +17727,7 @@ F: arch/riscv/
N: riscv
K: riscv
-RISC-V/MICROCHIP POLARFIRE SOC SUPPORT
+RISC-V MICROCHIP FPGA SUPPORT
M: Conor Dooley <conor.dooley@microchip.com>
M: Daire McNamara <daire.mcnamara@microchip.com>
L: linux-riscv@lists.infradead.org
@@ -17740,17 +17745,26 @@ F: Documentation/devicetree/bindings/usb/microchip,mpfs-musb.yaml
F: arch/riscv/boot/dts/microchip/
F: drivers/char/hw_random/mpfs-rng.c
F: drivers/clk/microchip/clk-mpfs.c
-F: drivers/i2c/busses/i2c-microchip-core.c
+F: drivers/i2c/busses/i2c-microchip-corei2c.c
F: drivers/mailbox/mailbox-mpfs.c
F: drivers/pci/controller/pcie-microchip-host.c
F: drivers/reset/reset-mpfs.c
F: drivers/rtc/rtc-mpfs.c
-F: drivers/soc/microchip/
+F: drivers/soc/microchip/mpfs-sys-controller.c
F: drivers/spi/spi-microchip-core-qspi.c
F: drivers/spi/spi-microchip-core.c
F: drivers/usb/musb/mpfs.c
F: include/soc/microchip/mpfs.h
+RISC-V MISC SOC SUPPORT
+M: Conor Dooley <conor@kernel.org>
+L: linux-riscv@lists.infradead.org
+S: Maintained
+Q: https://patchwork.kernel.org/project/linux-riscv/list/
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
+F: Documentation/devicetree/bindings/riscv/
+F: arch/riscv/boot/dts/
+
RNBD BLOCK DRIVERS
M: Md. Haris Iqbal <haris.iqbal@ionos.com>
M: Jack Wang <jinpu.wang@ionos.com>
@@ -17991,7 +18005,7 @@ L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/fbdev/savage/
-S390
+S390 ARCHITECTURE
M: Heiko Carstens <hca@linux.ibm.com>
M: Vasily Gorbik <gor@linux.ibm.com>
M: Alexander Gordeev <agordeev@linux.ibm.com>
@@ -18046,6 +18060,15 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/s390/net/
+S390 MM
+M: Alexander Gordeev <agordeev@linux.ibm.com>
+M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+L: linux-s390@vger.kernel.org
+S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git
+F: arch/s390/include/asm/pgtable.h
+F: arch/s390/mm
+
S390 PCI SUBSYSTEM
M: Niklas Schnelle <schnelle@linux.ibm.com>
M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
@@ -18777,7 +18800,6 @@ M: Palmer Dabbelt <palmer@dabbelt.com>
M: Paul Walmsley <paul.walmsley@sifive.com>
L: linux-riscv@lists.infradead.org
S: Supported
-T: git https://github.com/sifive/riscv-linux.git
N: sifive
K: [^@]sifive
@@ -18796,6 +18818,13 @@ S: Maintained
F: Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
F: drivers/dma/sf-pdma/
+SIFIVE SOC DRIVERS
+M: Conor Dooley <conor@kernel.org>
+L: linux-riscv@lists.infradead.org
+S: Maintained
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
+F: drivers/soc/sifive/
+
SILEAD TOUCHSCREEN DRIVER
M: Hans de Goede <hdegoede@redhat.com>
L: linux-input@vger.kernel.org
@@ -19597,6 +19626,11 @@ M: Ion Badulescu <ionut@badula.org>
S: Odd Fixes
F: drivers/net/ethernet/adaptec/starfire*
+STARFIVE DEVICETREES
+M: Emil Renner Berthing <kernel@esmil.dk>
+S: Maintained
+F: arch/riscv/boot/dts/starfive/
+
STARFIVE JH7100 CLOCK DRIVERS
M: Emil Renner Berthing <kernel@esmil.dk>
S: Maintained
diff --git a/Makefile b/Makefile
index 58cd4f5e1c3a..0992f827888d 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc8
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi
index dae448040a97..947497413977 100644
--- a/arch/arm/boot/dts/am335x-pcm-953.dtsi
+++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi
@@ -12,22 +12,20 @@
compatible = "phytec,am335x-pcm-953", "phytec,am335x-phycore-som", "ti,am33xx";
/* Power */
- regulators {
- vcc3v3: fixedregulator@1 {
- compatible = "regulator-fixed";
- regulator-name = "vcc3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-boot-on;
- };
+ vcc3v3: fixedregulator1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
- vcc1v8: fixedregulator@2 {
- compatible = "regulator-fixed";
- regulator-name = "vcc1v8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-boot-on;
- };
+ vcc1v8: fixedregulator2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
};
/* User IO */
diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi
index 7a113325abb9..6f9004ebf424 100644
--- a/arch/arm/boot/dts/at91rm9200.dtsi
+++ b/arch/arm/boot/dts/at91rm9200.dtsi
@@ -666,7 +666,7 @@
compatible = "atmel,at91rm9200-udc";
reg = <0xfffb0000 0x4000>;
interrupts = <11 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 11>, <&pmc PMC_TYPE_SYSTEM 2>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 11>, <&pmc PMC_TYPE_SYSTEM 1>;
clock-names = "pclk", "hclk";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index 60d61291f344..024af2db638e 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -39,6 +39,13 @@
};
+ usb1 {
+ pinctrl_usb1_vbus_gpio: usb1_vbus_gpio {
+ atmel,pins =
+ <AT91_PIOC 5 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PC5 GPIO */
+ };
+ };
+
mmc0_slot1 {
pinctrl_board_mmc0_slot1: mmc0_slot1-board {
atmel,pins =
@@ -84,6 +91,8 @@
};
usb1: gadget@fffa4000 {
+ pinctrl-0 = <&pinctrl_usb1_vbus_gpio>;
+ pinctrl-names = "default";
atmel,vbus-gpio = <&pioC 5 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6q-prti6q.dts b/arch/arm/boot/dts/imx6q-prti6q.dts
index b4605edfd2ab..d8fa83effd63 100644
--- a/arch/arm/boot/dts/imx6q-prti6q.dts
+++ b/arch/arm/boot/dts/imx6q-prti6q.dts
@@ -364,8 +364,8 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_wifi>;
interrupts-extended = <&gpio1 30 IRQ_TYPE_LEVEL_HIGH>;
- ref-clock-frequency = "38400000";
- tcxo-clock-frequency = "19200000";
+ ref-clock-frequency = <38400000>;
+ tcxo-clock-frequency = <19200000>;
};
};
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index 0fc9e6b8b05d..03d2e8544a4e 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -1270,10 +1270,10 @@
clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>;
};
- gpmi: nand-controller@33002000{
+ gpmi: nand-controller@33002000 {
compatible = "fsl,imx7d-gpmi-nand";
#address-cells = <1>;
- #size-cells = <1>;
+ #size-cells = <0>;
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
reg-names = "gpmi-nand", "bch";
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/lan966x-pcb8291.dts b/arch/arm/boot/dts/lan966x-pcb8291.dts
index f4f054cdf2a8..3a3d76af8612 100644
--- a/arch/arm/boot/dts/lan966x-pcb8291.dts
+++ b/arch/arm/boot/dts/lan966x-pcb8291.dts
@@ -69,6 +69,12 @@
pins = "GPIO_35", "GPIO_36";
function = "can0_b";
};
+
+ sgpio_a_pins: sgpio-a-pins {
+ /* SCK, D0, D1, LD */
+ pins = "GPIO_32", "GPIO_33", "GPIO_34", "GPIO_35";
+ function = "sgpio_a";
+ };
};
&can0 {
@@ -118,6 +124,20 @@
status = "okay";
};
+&sgpio {
+ pinctrl-0 = <&sgpio_a_pins>;
+ pinctrl-names = "default";
+ microchip,sgpio-port-ranges = <0 3>, <8 11>;
+ status = "okay";
+
+ gpio@0 {
+ ngpios = <64>;
+ };
+ gpio@1 {
+ ngpios = <64>;
+ };
+};
+
&switch {
status = "okay";
};
diff --git a/arch/arm/boot/dts/rk3036-evb.dts b/arch/arm/boot/dts/rk3036-evb.dts
index 9fd4d9db9f8f..becdc0b664bf 100644
--- a/arch/arm/boot/dts/rk3036-evb.dts
+++ b/arch/arm/boot/dts/rk3036-evb.dts
@@ -35,11 +35,10 @@
&i2c1 {
status = "okay";
- hym8563: hym8563@51 {
+ hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;
- clock-frequency = <32768>;
clock-output-names = "xin32k";
};
};
diff --git a/arch/arm/boot/dts/rk3066a-mk808.dts b/arch/arm/boot/dts/rk3066a-mk808.dts
index cfa318a506eb..2db5ba706208 100644
--- a/arch/arm/boot/dts/rk3066a-mk808.dts
+++ b/arch/arm/boot/dts/rk3066a-mk808.dts
@@ -32,7 +32,7 @@
keyup-threshold-microvolt = <2500000>;
poll-interval = <100>;
- recovery {
+ button-recovery {
label = "recovery";
linux,code = <KEY_VENDOR>;
press-threshold-microvolt = <0>;
diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts
index e7cf18823558..118deacd38c4 100644
--- a/arch/arm/boot/dts/rk3188-radxarock.dts
+++ b/arch/arm/boot/dts/rk3188-radxarock.dts
@@ -71,7 +71,7 @@
#sound-dai-cells = <0>;
};
- ir_recv: gpio-ir-receiver {
+ ir_recv: ir-receiver {
compatible = "gpio-ir-receiver";
gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index cdd4a0bd5133..44b54af0bbf9 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -379,7 +379,7 @@
rockchip,pins = <2 RK_PD3 1 &pcfg_pull_none>;
};
- lcdc1_rgb24: ldcd1-rgb24 {
+ lcdc1_rgb24: lcdc1-rgb24 {
rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>,
<2 RK_PA1 1 &pcfg_pull_none>,
<2 RK_PA2 1 &pcfg_pull_none>,
@@ -607,7 +607,6 @@
&global_timer {
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
- status = "disabled";
};
&local_timer {
diff --git a/arch/arm/boot/dts/rk3288-evb-act8846.dts b/arch/arm/boot/dts/rk3288-evb-act8846.dts
index be695b8c1f67..8a635c243127 100644
--- a/arch/arm/boot/dts/rk3288-evb-act8846.dts
+++ b/arch/arm/boot/dts/rk3288-evb-act8846.dts
@@ -54,7 +54,7 @@
vin-supply = <&vcc_sys>;
};
- hym8563@51 {
+ rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi
index 399d6b9c5fd4..382d2839cf47 100644
--- a/arch/arm/boot/dts/rk3288-evb.dtsi
+++ b/arch/arm/boot/dts/rk3288-evb.dtsi
@@ -28,19 +28,19 @@
press-threshold-microvolt = <300000>;
};
- menu {
+ button-menu {
label = "Menu";
linux,code = <KEY_MENU>;
press-threshold-microvolt = <640000>;
};
- esc {
+ button-esc {
label = "Esc";
linux,code = <KEY_ESC>;
press-threshold-microvolt = <1000000>;
};
- home {
+ button-home {
label = "Home";
linux,code = <KEY_HOME>;
press-threshold-microvolt = <1300000>;
diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi
index 052afe5543e2..3836c61cfb76 100644
--- a/arch/arm/boot/dts/rk3288-firefly.dtsi
+++ b/arch/arm/boot/dts/rk3288-firefly.dtsi
@@ -233,11 +233,10 @@
vin-supply = <&vcc_sys>;
};
- hym8563: hym8563@51 {
+ hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;
- clock-frequency = <32768>;
clock-output-names = "xin32k";
interrupt-parent = <&gpio7>;
interrupts = <RK_PA4 IRQ_TYPE_EDGE_FALLING>;
diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts
index 713f55e143c6..db1eb648e0e1 100644
--- a/arch/arm/boot/dts/rk3288-miqi.dts
+++ b/arch/arm/boot/dts/rk3288-miqi.dts
@@ -162,11 +162,10 @@
vin-supply = <&vcc_sys>;
};
- hym8563: hym8563@51 {
+ hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;
- clock-frequency = <32768>;
clock-output-names = "xin32k";
};
diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts b/arch/arm/boot/dts/rk3288-rock2-square.dts
index 80e0f07c8e87..13cfdaa95cc7 100644
--- a/arch/arm/boot/dts/rk3288-rock2-square.dts
+++ b/arch/arm/boot/dts/rk3288-rock2-square.dts
@@ -165,11 +165,10 @@
};
&i2c0 {
- hym8563: hym8563@51 {
+ hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;
- clock-frequency = <32768>;
clock-output-names = "xin32k";
interrupt-parent = <&gpio0>;
interrupts = <RK_PA4 IRQ_TYPE_EDGE_FALLING>;
diff --git a/arch/arm/boot/dts/rk3288-vmarc-som.dtsi b/arch/arm/boot/dts/rk3288-vmarc-som.dtsi
index 0ae2bd150e37..793951655b73 100644
--- a/arch/arm/boot/dts/rk3288-vmarc-som.dtsi
+++ b/arch/arm/boot/dts/rk3288-vmarc-som.dtsi
@@ -241,7 +241,6 @@
interrupt-parent = <&gpio5>;
interrupts = <RK_PC3 IRQ_TYPE_LEVEL_LOW>;
#clock-cells = <0>;
- clock-frequency = <32768>;
clock-output-names = "hym8563";
pinctrl-names = "default";
pinctrl-0 = <&hym8563_int>;
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index bf285091a9eb..cb4e42ede56a 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -76,6 +76,13 @@
reg = <0x1013c200 0x20>;
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
clocks = <&cru CORE_PERI>;
+ status = "disabled";
+ /* The clock source and the sched_clock provided by the arm_global_timer
+ * on Rockchip rk3066a/rk3188 are quite unstable because their rates
+ * depend on the CPU frequency.
+ * Keep the arm_global_timer disabled in order to have the
+ * DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected by default.
+ */
};
local_timer: local-timer@1013c600 {
diff --git a/arch/arm/boot/dts/sama7g5-pinfunc.h b/arch/arm/boot/dts/sama7g5-pinfunc.h
index 4eb30445d205..6e87f0d4b8fc 100644
--- a/arch/arm/boot/dts/sama7g5-pinfunc.h
+++ b/arch/arm/boot/dts/sama7g5-pinfunc.h
@@ -261,7 +261,7 @@
#define PIN_PB2__FLEXCOM6_IO0 PINMUX_PIN(PIN_PB2, 2, 1)
#define PIN_PB2__ADTRG PINMUX_PIN(PIN_PB2, 3, 1)
#define PIN_PB2__A20 PINMUX_PIN(PIN_PB2, 4, 1)
-#define PIN_PB2__FLEXCOM11_IO0 PINMUX_PIN(PIN_PB2, 6, 3)
+#define PIN_PB2__FLEXCOM11_IO1 PINMUX_PIN(PIN_PB2, 6, 3)
#define PIN_PB3 35
#define PIN_PB3__GPIO PINMUX_PIN(PIN_PB3, 0, 0)
#define PIN_PB3__RF1 PINMUX_PIN(PIN_PB3, 1, 1)
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index fe87397c3d8c..bdbc1e590891 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -17,7 +17,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->ARM_pc = (__ip); \
- (regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
+ frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
(regs)->ARM_sp = current_stack_pointer; \
(regs)->ARM_cpsr = SVC_MODE; \
}
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index d16aba48fa0a..090011394477 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -45,12 +45,6 @@
typedef pte_t *pte_addr_t;
/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr) (virt_to_page(0))
-
-/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) (prot)
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 78a532068fec..ef48a55e9af8 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -10,6 +10,15 @@
#include <linux/const.h>
#include <asm/proc-fns.h>
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr) (empty_zero_page)
+#endif
+
#ifndef CONFIG_MMU
#include <asm-generic/pgtable-nopud.h>
@@ -139,13 +148,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
*/
#ifndef __ASSEMBLY__
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern struct page *empty_zero_page;
-#define ZERO_PAGE(vaddr) (empty_zero_page)
-
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index ffed4d949042..e4904faf1753 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -169,10 +169,15 @@ sr_ena_2:
cmp tmp1, #UDDRC_STAT_SELFREF_TYPE_SW
bne sr_ena_2
- /* Put DDR PHY's DLL in bypass mode for non-backup modes. */
+ /* Disable DX DLLs for non-backup modes. */
cmp r7, #AT91_PM_BACKUP
beq sr_ena_3
+ /* Do not soft reset the AC DLL. */
+ ldr tmp1, [r3, DDR3PHY_ACDLLCR]
+ bic tmp1, tmp1, DDR3PHY_ACDLLCR_DLLSRST
+ str tmp1, [r3, DDR3PHY_ACDLLCR]
+
/* Disable DX DLLs. */
ldr tmp1, [r3, #DDR3PHY_DX0DLLCR]
orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 25c9d184fa4c..1c57ac401649 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -393,8 +393,10 @@ static void __init mxs_machine_init(void)
root = of_find_node_by_path("/");
ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
- if (ret)
+ if (ret) {
+ kfree(soc_dev_attr);
return;
+ }
soc_dev_attr->family = "Freescale MXS Family";
soc_dev_attr->soc_id = mxs_get_soc_id();
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index c42debaded95..c1494a4dee25 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -26,6 +26,13 @@
unsigned long vectors_base;
+/*
+ * empty_zero_page is a special page that is used for
+ * zero-initialized data and COW.
+ */
+struct page *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+
#ifdef CONFIG_ARM_MPU
struct mpu_rgn_info mpu_rgn_info;
#endif
@@ -148,9 +155,21 @@ void __init adjust_lowmem_bounds(void)
*/
void __init paging_init(const struct machine_desc *mdesc)
{
+ void *zero_page;
+
early_trap_init((void *)vectors_base);
mpu_setup();
+
+ /* allocate the zero page. */
+ zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
bootmem_init();
+
+ empty_zero_page = virt_to_page(zero_page);
+ flush_dcache_page(empty_zero_page);
}
/*
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index 53f6660656ac..ca1d287a0a01 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -161,6 +161,7 @@
clocks = <&ccu CLK_BUS_VP9>, <&ccu CLK_VP9>;
clock-names = "bus", "mod";
resets = <&ccu RST_BUS_VP9>;
+ iommus = <&iommu 5>;
};
video-codec@1c0e000 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
index 7e0aeb2db305..a0aeac619929 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
@@ -34,11 +34,25 @@
off-on-delay-us = <12000>;
};
- extcon_usbotg1: extcon-usbotg1 {
- compatible = "linux,extcon-usb-gpio";
+ connector {
+ compatible = "gpio-usb-b-connector", "usb-b-connector";
+ type = "micro";
+ label = "X19";
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usb1_extcon>;
- id-gpio = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&pinctrl_usb1_connector>;
+ id-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ usb_dr_connector: endpoint {
+ remote-endpoint = <&usb1_drd_sw>;
+ };
+ };
+ };
};
};
@@ -105,13 +119,19 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbotg1>;
dr_mode = "otg";
- extcon = <&extcon_usbotg1>;
srp-disable;
hnp-disable;
adp-disable;
power-active-high;
over-current-active-low;
+ usb-role-switch;
status = "okay";
+
+ port {
+ usb1_drd_sw: endpoint {
+ remote-endpoint = <&usb_dr_connector>;
+ };
+ };
};
&usbotg2 {
@@ -231,7 +251,7 @@
<MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x84>;
};
- pinctrl_usb1_extcon: usb1-extcongrp {
+ pinctrl_usb1_connector: usb1-connectorgrp {
fsl,pins = <MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x1c0>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index dabd94dc30c4..50ef92915c67 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -1244,10 +1244,10 @@
clocks = <&clk IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK>;
};
- gpmi: nand-controller@33002000{
+ gpmi: nand-controller@33002000 {
compatible = "fsl,imx8mm-gpmi-nand", "fsl,imx7d-gpmi-nand";
#address-cells = <1>;
- #size-cells = <1>;
+ #size-cells = <0>;
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
reg-names = "gpmi-nand", "bch";
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index ad0b99adf691..67b554ba690c 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -1102,7 +1102,7 @@
gpmi: nand-controller@33002000 {
compatible = "fsl,imx8mn-gpmi-nand", "fsl,imx7d-gpmi-nand";
#address-cells = <1>;
- #size-cells = <1>;
+ #size-cells = <0>;
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
reg-names = "gpmi-nand", "bch";
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
index 9f1469db554d..b4c1ef2559f2 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
@@ -544,14 +544,14 @@
pinctrl_pcie0: pcie0grp {
fsl,pins = <
- MX8MP_IOMUXC_I2C4_SCL__PCIE_CLKREQ_B 0x61 /* open drain, pull up */
- MX8MP_IOMUXC_SD1_DATA5__GPIO2_IO07 0x41
+ MX8MP_IOMUXC_I2C4_SCL__PCIE_CLKREQ_B 0x60 /* open drain, pull up */
+ MX8MP_IOMUXC_SD1_DATA5__GPIO2_IO07 0x40
>;
};
pinctrl_pcie0_reg: pcie0reggrp {
fsl,pins = <
- MX8MP_IOMUXC_SD1_DATA4__GPIO2_IO06 0x41
+ MX8MP_IOMUXC_SD1_DATA4__GPIO2_IO06 0x40
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-pinfunc.h b/arch/arm64/boot/dts/freescale/imx93-pinfunc.h
index 4298a145f8a9..4298a145f8a9 100755..100644
--- a/arch/arm64/boot/dts/freescale/imx93-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx93-pinfunc.h
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index a47acf9bdf24..a721cdd80489 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -668,7 +668,7 @@
apcs_glb: mailbox@b111000 {
compatible = "qcom,ipq8074-apcs-apps-global";
- reg = <0x0b111000 0x6000>;
+ reg = <0x0b111000 0x1000>;
#clock-cells = <1>;
#mbox-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index c0a2baffa49d..aba717644391 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -3504,7 +3504,7 @@
};
saw3: syscon@9a10000 {
- compatible = "qcom,tcsr-msm8996", "syscon";
+ compatible = "syscon";
reg = <0x09a10000 0x1000>;
};
diff --git a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
index 87ab0e1ecd16..4dee790f1049 100644
--- a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
+++ b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
@@ -43,7 +43,6 @@
regulator-always-on;
regulator-boot-on;
- regulator-allow-set-load;
vin-supply = <&vreg_3p3>;
};
@@ -137,6 +136,9 @@
regulator-max-microvolt = <880000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7a_1p8: ldo7 {
@@ -152,6 +154,9 @@
regulator-max-microvolt = <2960000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l11a_0p8: ldo11 {
@@ -258,6 +263,9 @@
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7c_1p8: ldo7 {
@@ -273,6 +281,9 @@
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l10c_3p3: ldo10 {
diff --git a/arch/arm64/boot/dts/qcom/sa8295p-adp.dts b/arch/arm64/boot/dts/qcom/sa8295p-adp.dts
index b608b82dff03..2c62ba6a49c5 100644
--- a/arch/arm64/boot/dts/qcom/sa8295p-adp.dts
+++ b/arch/arm64/boot/dts/qcom/sa8295p-adp.dts
@@ -83,6 +83,9 @@
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l4c: ldo4 {
@@ -98,6 +101,9 @@
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7c: ldo7 {
@@ -113,6 +119,9 @@
regulator-max-microvolt = <2504000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l17c: ldo17 {
@@ -121,6 +130,9 @@
regulator-max-microvolt = <2504000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index 212580316d3e..4cdc88d33944 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -2296,7 +2296,8 @@
lpass_audiocc: clock-controller@3300000 {
compatible = "qcom,sc7280-lpassaudiocc";
- reg = <0 0x03300000 0 0x30000>;
+ reg = <0 0x03300000 0 0x30000>,
+ <0 0x032a9000 0 0x1000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&lpass_aon LPASS_AON_CC_MAIN_RCG_CLK_SRC>;
clock-names = "bi_tcxo", "lpass_aon_cc_main_rcg_clk_src";
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
index fea7d8273ccd..5e30349efd20 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
@@ -124,6 +124,9 @@
regulator-max-microvolt = <2504000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l13c: ldo13 {
@@ -146,6 +149,9 @@
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l4d: ldo4 {
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
index c32bcded2aef..212d63d5cbf2 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
@@ -885,13 +885,13 @@
ufs_mem_phy: phy@1d87000 {
compatible = "qcom,sc8280xp-qmp-ufs-phy";
- reg = <0 0x01d87000 0 0xe10>;
+ reg = <0 0x01d87000 0 0x1c8>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
clock-names = "ref",
"ref_aux";
- clocks = <&rpmhcc RPMH_CXO_CLK>,
+ clocks = <&gcc GCC_UFS_REF_CLKREF_CLK>,
<&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
resets = <&ufs_mem_hc 0>;
@@ -953,13 +953,13 @@
ufs_card_phy: phy@1da7000 {
compatible = "qcom,sc8280xp-qmp-ufs-phy";
- reg = <0 0x01da7000 0 0xe10>;
+ reg = <0 0x01da7000 0 0x1c8>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
clock-names = "ref",
"ref_aux";
- clocks = <&gcc GCC_UFS_1_CARD_CLKREF_CLK>,
+ clocks = <&gcc GCC_UFS_REF_CLKREF_CLK>,
<&gcc GCC_UFS_CARD_PHY_AUX_CLK>;
resets = <&ufs_card_hc 0>;
@@ -1181,26 +1181,16 @@
usb_0_ssphy: usb3-phy@88eb400 {
reg = <0 0x088eb400 0 0x100>,
<0 0x088eb600 0 0x3ec>,
- <0 0x088ec400 0 0x1f0>,
+ <0 0x088ec400 0 0x364>,
<0 0x088eba00 0 0x100>,
<0 0x088ebc00 0 0x3ec>,
- <0 0x088ec700 0 0x64>;
+ <0 0x088ec200 0 0x18>;
#phy-cells = <0>;
#clock-cells = <0>;
clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
clock-names = "pipe0";
clock-output-names = "usb0_phy_pipe_clk_src";
};
-
- usb_0_dpphy: dp-phy@88ed200 {
- reg = <0 0x088ed200 0 0x200>,
- <0 0x088ed400 0 0x200>,
- <0 0x088eda00 0 0x200>,
- <0 0x088ea600 0 0x200>,
- <0 0x088ea800 0 0x200>;
- #clock-cells = <1>;
- #phy-cells = <0>;
- };
};
usb_1_hsphy: phy@8902000 {
@@ -1242,8 +1232,8 @@
usb_1_ssphy: usb3-phy@8903400 {
reg = <0 0x08903400 0 0x100>,
- <0 0x08903c00 0 0x3ec>,
- <0 0x08904400 0 0x1f0>,
+ <0 0x08903600 0 0x3ec>,
+ <0 0x08904400 0 0x364>,
<0 0x08903a00 0 0x100>,
<0 0x08903c00 0 0x3ec>,
<0 0x08904200 0 0x18>;
@@ -1253,16 +1243,6 @@
clock-names = "pipe0";
clock-output-names = "usb1_phy_pipe_clk_src";
};
-
- usb_1_dpphy: dp-phy@8904200 {
- reg = <0 0x08904200 0 0x200>,
- <0 0x08904400 0 0x200>,
- <0 0x08904a00 0 0x200>,
- <0 0x08904600 0 0x200>,
- <0 0x08904800 0 0x200>;
- #clock-cells = <1>;
- #phy-cells = <0>;
- };
};
system-cache-controller@9200000 {
diff --git a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
index 014fe3a31548..fb6e5a140c9f 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
@@ -348,6 +348,9 @@
regulator-max-microvolt = <2960000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7c_3p0: ldo7 {
@@ -367,6 +370,9 @@
regulator-max-microvolt = <2960000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l10c_3p3: ldo10 {
diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
index 549e0a2aa9fe..5428aab3058d 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
@@ -317,6 +317,9 @@
regulator-max-microvolt = <2960000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7c_2p85: ldo7 {
@@ -339,6 +342,9 @@
regulator-max-microvolt = <2960000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l10c_3p3: ldo10 {
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index a5b62cadb129..e276eed1f8e2 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -334,6 +334,7 @@
exit-latency-us = <6562>;
min-residency-us = <9987>;
local-timer-stop;
+ status = "disabled";
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
index 0fcf5bd88fc7..69ae6503c2f6 100644
--- a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
@@ -107,6 +107,9 @@
regulator-max-microvolt = <888000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l6b_1p2: ldo6 {
@@ -115,6 +118,9 @@
regulator-max-microvolt = <1208000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7b_2p96: ldo7 {
@@ -123,6 +129,9 @@
regulator-max-microvolt = <2504000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l9b_1p2: ldo9 {
@@ -131,6 +140,9 @@
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-allow-set-load;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/px30-evb.dts b/arch/arm64/boot/dts/rockchip/px30-evb.dts
index 07008d84434c..c1bbd555f5f5 100644
--- a/arch/arm64/boot/dts/rockchip/px30-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/px30-evb.dts
@@ -30,31 +30,31 @@
keyup-threshold-microvolt = <1800000>;
poll-interval = <100>;
- esc-key {
+ button-esc {
label = "esc";
linux,code = <KEY_ESC>;
press-threshold-microvolt = <1310000>;
};
- home-key {
+ button-home {
label = "home";
linux,code = <KEY_HOME>;
press-threshold-microvolt = <624000>;
};
- menu-key {
+ button-menu {
label = "menu";
linux,code = <KEY_MENU>;
press-threshold-microvolt = <987000>;
};
- vol-down-key {
+ button-down {
label = "volume down";
linux,code = <KEY_VOLUMEDOWN>;
press-threshold-microvolt = <300000>;
};
- vol-up-key {
+ button-up {
label = "volume up";
linux,code = <KEY_VOLUMEUP>;
press-threshold-microvolt = <17000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3308-evb.dts b/arch/arm64/boot/dts/rockchip/rk3308-evb.dts
index 9fe9b0d11003..184b84fdde07 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3308-evb.dts
@@ -23,7 +23,7 @@
poll-interval = <100>;
keyup-threshold-microvolt = <1800000>;
- func-key {
+ button-func {
linux,code = <KEY_FN>;
label = "function";
press-threshold-microvolt = <18000>;
@@ -37,31 +37,31 @@
poll-interval = <100>;
keyup-threshold-microvolt = <1800000>;
- esc-key {
+ button-esc {
linux,code = <KEY_MICMUTE>;
label = "micmute";
press-threshold-microvolt = <1130000>;
};
- home-key {
+ button-home {
linux,code = <KEY_MODE>;
label = "mode";
press-threshold-microvolt = <901000>;
};
- menu-key {
+ button-menu {
linux,code = <KEY_PLAY>;
label = "play";
press-threshold-microvolt = <624000>;
};
- vol-down-key {
+ button-down {
linux,code = <KEY_VOLUMEDOWN>;
label = "volume down";
press-threshold-microvolt = <300000>;
};
- vol-up-key {
+ button-up {
linux,code = <KEY_VOLUMEUP>;
label = "volume up";
press-threshold-microvolt = <18000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
index ea6820902ede..7ea48167747c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
@@ -19,7 +19,7 @@
stdout-path = "serial2:1500000n8";
};
- ir_rx {
+ ir-receiver {
compatible = "gpio-ir-receiver";
gpios = <&gpio0 RK_PC0 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts b/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts
index 43c928ac98f0..1deef53a4c94 100644
--- a/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts
@@ -25,7 +25,7 @@
keyup-threshold-microvolt = <1800000>;
poll-interval = <100>;
- recovery {
+ button-recovery {
label = "recovery";
linux,code = <KEY_VENDOR>;
press-threshold-microvolt = <17000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
index 7f5bba0c6001..81d1064fdb21 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
@@ -208,11 +208,10 @@
vin-supply = <&vcc_sys>;
};
- hym8563: hym8563@51 {
+ hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;
- clock-frequency = <32768>;
clock-output-names = "xin32k";
/* rtc_int is not connected */
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
index 38d757c00548..5589f3db6b36 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
@@ -192,11 +192,10 @@
vin-supply = <&vcc_sys>;
};
- hym8563: hym8563@51 {
+ hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;
- clock-frequency = <32768>;
clock-output-names = "xin32k";
/* rtc_int is not connected */
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
index ed3348b558f8..a47d9f758611 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
@@ -734,10 +734,6 @@ camera: &i2c7 {
};
/* PINCTRL OVERRIDES */
-&ec_ap_int_l {
- rockchip,pins = <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>;
-};
-
&ap_fw_wp {
rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
index 2a332763c35c..9d9297bc5f04 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
@@ -123,7 +123,7 @@
keyup-threshold-microvolt = <1800000>;
poll-interval = <100>;
- recovery {
+ button-recovery {
label = "Recovery";
linux,code = <KEY_VENDOR>;
press-threshold-microvolt = <18000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
index 452728b82e42..3bf8f959e42c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
@@ -39,7 +39,7 @@
keyup-threshold-microvolt = <1800000>;
poll-interval = <100>;
- recovery {
+ button-recovery {
label = "Recovery";
linux,code = <KEY_VENDOR>;
press-threshold-microvolt = <18000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4b.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4b.dts
index 72182c58cc46..65cb21837b0c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4b.dts
@@ -19,7 +19,7 @@
keyup-threshold-microvolt = <1500000>;
poll-interval = <100>;
- recovery {
+ button-recovery {
label = "Recovery";
linux,code = <KEY_VENDOR>;
press-threshold-microvolt = <18000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
index 278123b4f911..b6e082f1f6d9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
@@ -167,6 +167,7 @@
};
&emmc_phy {
+ rockchip,enable-strobe-pulldown;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
index 9e2e246e0bab..dba4d03bfc2b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
@@ -52,13 +52,13 @@
press-threshold-microvolt = <300000>;
};
- back {
+ button-back {
label = "Back";
linux,code = <KEY_BACK>;
press-threshold-microvolt = <985000>;
};
- menu {
+ button-menu {
label = "Menu";
linux,code = <KEY_MENU>;
press-threshold-microvolt = <1314000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
index 04c752f49be9..115c14c0a3c6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
@@ -207,7 +207,7 @@
cap-sd-highspeed;
cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
disable-wp;
- max-frequency = <150000000>;
+ max-frequency = <40000000>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
vmmc-supply = <&vcc3v3_baseboard>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts
index 5a2661ae0131..7ba1c28f70a9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts
@@ -98,13 +98,12 @@
};
&i2c0 {
- hym8563: hym8563@51 {
+ hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
interrupt-parent = <&gpio0>;
interrupts = <RK_PA5 IRQ_TYPE_EDGE_FALLING>;
#clock-cells = <0>;
- clock-frequency = <32768>;
clock-output-names = "xin32k";
pinctrl-names = "default";
pinctrl-0 = <&hym8563_int>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
index 2f4b1b2e3ac7..bbf1e3f24585 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
@@ -41,7 +41,7 @@
keyup-threshold-microvolt = <1500000>;
poll-interval = <100>;
- recovery {
+ button-recovery {
label = "Recovery";
linux,code = <KEY_VENDOR>;
press-threshold-microvolt = <18000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
index 645ced6617a6..1f76d3501bda 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
@@ -509,7 +509,6 @@
&i2s1 {
rockchip,playback-channels = <2>;
rockchip,capture-channels = <2>;
- status = "okay";
};
&i2s2 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
index 13927e7d0724..dbec2b7173a0 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
@@ -33,13 +33,13 @@
press-threshold-microvolt = <300000>;
};
- back {
+ button-back {
label = "Back";
linux,code = <KEY_BACK>;
press-threshold-microvolt = <985000>;
};
- menu {
+ button-menu {
label = "Menu";
linux,code = <KEY_MENU>;
press-threshold-microvolt = <1314000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi b/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi
index 935b8c68a71d..bf9eb0405b62 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi
@@ -297,11 +297,10 @@
clock-frequency = <400000>;
status = "okay";
- hym8563: hym8563@51 {
+ hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;
- clock-frequency = <32768>;
clock-output-names = "hym8563";
pinctrl-names = "default";
pinctrl-0 = <&hym8563_int>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi
index 0d45868132b9..8d61f824c12d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi
@@ -23,7 +23,7 @@
io-channel-names = "buttons";
keyup-threshold-microvolt = <1750000>;
- recovery {
+ button-recovery {
label = "recovery";
linux,code = <KEY_VENDOR>;
press-threshold-microvolt = <0>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
index a05460b92415..25a8c781f4e7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
@@ -740,7 +740,7 @@
&uart1 {
pinctrl-names = "default";
- pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn>;
+ pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>;
status = "okay";
uart-has-rtscts;
@@ -748,13 +748,14 @@
compatible = "brcm,bcm43438-bt";
clocks = <&rk817 1>;
clock-names = "lpo";
- device-wakeup-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>;
- host-wakeup-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>;
+ device-wakeup-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>;
shutdown-gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&bt_host_wake_l &bt_wake_l &bt_enable_h>;
vbat-supply = <&vcc_sys>;
vddio-supply = <&vcca1v8_pmu>;
+ max-speed = <3000000>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
index 77b179cd20e7..b276eb0810c7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
@@ -246,7 +246,7 @@
compatible = "rockchip,rk809";
reg = <0x20>;
interrupt-parent = <&gpio0>;
- interrupts = <RK_PA7 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <RK_PA3 IRQ_TYPE_LEVEL_LOW>;
assigned-clocks = <&cru I2S1_MCLKOUT_TX>;
assigned-clock-parents = <&cru CLK_I2S1_8CH_TX>;
clock-names = "mclk";
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts b/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts
index dba648c2f57e..9fd262334d77 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts
@@ -142,7 +142,7 @@
assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru SCLK_GMAC1>;
assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru SCLK_GMAC1>, <&gmac1_clkin>;
clock_in_out = "input";
- phy-mode = "rgmii-id";
+ phy-mode = "rgmii";
phy-supply = <&vcc_3v3>;
pinctrl-names = "default";
pinctrl-0 = <&gmac1m0_miim
@@ -432,11 +432,7 @@
&i2c3 {
pinctrl-names = "default";
- pinctrl-0 = <&i2c3m1_xfer>;
- status = "okay";
-};
-
-&i2c5 {
+ pinctrl-0 = <&i2c3m0_xfer>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
index c282f6e79960..26d7fda275ed 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
@@ -500,7 +500,6 @@
interrupt-parent = <&gpio0>;
interrupts = <RK_PD3 IRQ_TYPE_EDGE_FALLING>;
#clock-cells = <0>;
- clock-frequency = <32768>;
clock-output-names = "rtcic_32kout";
pinctrl-names = "default";
pinctrl-0 = <&hym8563_int>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
index fb87a168fe96..539ef8cc7792 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
@@ -509,7 +509,6 @@
interrupt-parent = <&gpio0>;
interrupts = <RK_PD3 IRQ_TYPE_LEVEL_LOW>;
#clock-cells = <0>;
- clock-frequency = <32768>;
clock-output-names = "rtcic_32kout";
pinctrl-names = "default";
pinctrl-0 = <&hym8563_int>;
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 925364586672..108b115dbf5b 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -14,16 +14,8 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
-
-bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg);
#else
#define efi_init()
-
-static inline
-bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
-{
- return false;
-}
#endif
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 71a1af42f0e8..edf6625ce965 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -863,12 +863,12 @@ static inline bool pte_user_accessible_page(pte_t pte)
static inline bool pmd_user_accessible_page(pmd_t pmd)
{
- return pmd_present(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
+ return pmd_leaf(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
}
static inline bool pud_user_accessible_page(pud_t pud)
{
- return pud_present(pud) && pud_user(pud);
+ return pud_leaf(pud) && pud_user(pud);
}
#endif
diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S
index 67babd5f04c2..75691a2641c1 100644
--- a/arch/arm64/kernel/efi-rt-wrapper.S
+++ b/arch/arm64/kernel/efi-rt-wrapper.S
@@ -6,7 +6,7 @@
#include <linux/linkage.h>
SYM_FUNC_START(__efi_rt_asm_wrapper)
- stp x29, x30, [sp, #-112]!
+ stp x29, x30, [sp, #-32]!
mov x29, sp
/*
@@ -17,20 +17,6 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
stp x1, x18, [sp, #16]
/*
- * Preserve all callee saved registers and record the stack pointer
- * value in a per-CPU variable so we can recover from synchronous
- * exceptions occurring while running the firmware routines.
- */
- stp x19, x20, [sp, #32]
- stp x21, x22, [sp, #48]
- stp x23, x24, [sp, #64]
- stp x25, x26, [sp, #80]
- stp x27, x28, [sp, #96]
-
- adr_this_cpu x8, __efi_rt_asm_recover_sp, x9
- str x29, [x8]
-
- /*
* We are lucky enough that no EFI runtime services take more than
* 5 arguments, so all are passed in registers rather than via the
* stack.
@@ -45,7 +31,7 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
ldp x1, x2, [sp, #16]
cmp x2, x18
- ldp x29, x30, [sp], #112
+ ldp x29, x30, [sp], #32
b.ne 0f
ret
0:
@@ -59,18 +45,3 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
mov x18, x2
b efi_handle_corrupted_x18 // tail call
SYM_FUNC_END(__efi_rt_asm_wrapper)
-
-SYM_FUNC_START(__efi_rt_asm_recover)
- ldr_this_cpu x8, __efi_rt_asm_recover_sp, x9
- mov sp, x8
-
- ldp x0, x18, [sp, #16]
- ldp x19, x20, [sp, #32]
- ldp x21, x22, [sp, #48]
- ldp x23, x24, [sp, #64]
- ldp x25, x26, [sp, #80]
- ldp x27, x28, [sp, #96]
- ldp x29, x30, [sp], #112
-
- b efi_handle_runtime_exception
-SYM_FUNC_END(__efi_rt_asm_recover)
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index ee53f2a0aa03..a908a37f0367 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -9,7 +9,6 @@
#include <linux/efi.h>
#include <linux/init.h>
-#include <linux/percpu.h>
#include <asm/efi.h>
@@ -145,28 +144,3 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f);
return s;
}
-
-asmlinkage DEFINE_PER_CPU(u64, __efi_rt_asm_recover_sp);
-
-asmlinkage efi_status_t __efi_rt_asm_recover(void);
-
-asmlinkage efi_status_t efi_handle_runtime_exception(const char *f)
-{
- pr_err(FW_BUG "Synchronous exception occurred in EFI runtime service %s()\n", f);
- clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
- return EFI_ABORTED;
-}
-
-bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
-{
- /* Check whether the exception occurred while running the firmware */
- if (current_work() != &efi_rts_work.work || regs->pc >= TASK_SIZE_64)
- return false;
-
- pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg);
- add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
- dump_stack();
-
- regs->pc = (u64)__efi_rt_asm_recover;
- return true;
-}
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 795344ab4ec4..322a831f8ede 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -299,11 +299,11 @@ SYM_TYPED_FUNC_START(ftrace_stub)
ret
SYM_FUNC_END(ftrace_stub)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_TYPED_FUNC_START(ftrace_stub_graph)
ret
SYM_FUNC_END(ftrace_stub_graph)
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* void return_to_handler(void)
*
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 3e9cf9826417..5b391490e045 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -30,7 +30,6 @@
#include <asm/bug.h>
#include <asm/cmpxchg.h>
#include <asm/cpufeature.h>
-#include <asm/efi.h>
#include <asm/exception.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
@@ -392,9 +391,6 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
msg = "paging request";
}
- if (efi_runtime_fixup_exception(regs, msg))
- return;
-
die_kernel_fault(msg, addr, esr, regs);
}
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index f4cb54d5afd6..01b57b726322 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -97,7 +97,7 @@ KBUILD_LDFLAGS += -m $(ld-emul)
ifdef CONFIG_LOONGARCH
CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
- egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
+ grep -E -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
endif
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
index d06d4542b634..5332b1433f38 100644
--- a/arch/loongarch/include/asm/irq.h
+++ b/arch/loongarch/include/asm/irq.h
@@ -117,7 +117,7 @@ extern struct fwnode_handle *liointc_handle;
extern struct fwnode_handle *pch_lpc_handle;
extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
-extern irqreturn_t loongson3_ipi_interrupt(int irq, void *dev);
+extern irqreturn_t loongson_ipi_interrupt(int irq, void *dev);
#include <asm-generic/irq.h>
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 946704bee599..79d5bfd913e0 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -349,13 +349,17 @@ static inline pte_t pte_mkclean(pte_t pte)
static inline pte_t pte_mkdirty(pte_t pte)
{
- pte_val(pte) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
+ pte_val(pte) |= _PAGE_MODIFIED;
+ if (pte_val(pte) & _PAGE_WRITE)
+ pte_val(pte) |= _PAGE_DIRTY;
return pte;
}
static inline pte_t pte_mkwrite(pte_t pte)
{
- pte_val(pte) |= (_PAGE_WRITE | _PAGE_DIRTY);
+ pte_val(pte) |= _PAGE_WRITE;
+ if (pte_val(pte) & _PAGE_MODIFIED)
+ pte_val(pte) |= _PAGE_DIRTY;
return pte;
}
@@ -455,7 +459,9 @@ static inline int pmd_write(pmd_t pmd)
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
- pmd_val(pmd) |= (_PAGE_WRITE | _PAGE_DIRTY);
+ pmd_val(pmd) |= _PAGE_WRITE;
+ if (pmd_val(pmd) & _PAGE_MODIFIED)
+ pmd_val(pmd) |= _PAGE_DIRTY;
return pmd;
}
@@ -478,10 +484,13 @@ static inline pmd_t pmd_mkclean(pmd_t pmd)
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
- pmd_val(pmd) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
+ pmd_val(pmd) |= _PAGE_MODIFIED;
+ if (pmd_val(pmd) & _PAGE_WRITE)
+ pmd_val(pmd) |= _PAGE_DIRTY;
return pmd;
}
+#define pmd_young pmd_young
static inline int pmd_young(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_ACCESSED);
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
index 71189b28bfb2..3dd172d9ffea 100644
--- a/arch/loongarch/include/asm/smp.h
+++ b/arch/loongarch/include/asm/smp.h
@@ -19,21 +19,21 @@ extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];
-void loongson3_smp_setup(void);
-void loongson3_prepare_cpus(unsigned int max_cpus);
-void loongson3_boot_secondary(int cpu, struct task_struct *idle);
-void loongson3_init_secondary(void);
-void loongson3_smp_finish(void);
-void loongson3_send_ipi_single(int cpu, unsigned int action);
-void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action);
+void loongson_smp_setup(void);
+void loongson_prepare_cpus(unsigned int max_cpus);
+void loongson_boot_secondary(int cpu, struct task_struct *idle);
+void loongson_init_secondary(void);
+void loongson_smp_finish(void);
+void loongson_send_ipi_single(int cpu, unsigned int action);
+void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action);
#ifdef CONFIG_HOTPLUG_CPU
-int loongson3_cpu_disable(void);
-void loongson3_cpu_die(unsigned int cpu);
+int loongson_cpu_disable(void);
+void loongson_cpu_die(unsigned int cpu);
#endif
static inline void plat_smp_setup(void)
{
- loongson3_smp_setup();
+ loongson_smp_setup();
}
static inline int raw_smp_processor_id(void)
@@ -85,28 +85,28 @@ extern void show_ipi_list(struct seq_file *p, int prec);
*/
static inline void smp_send_reschedule(int cpu)
{
- loongson3_send_ipi_single(cpu, SMP_RESCHEDULE);
+ loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
}
static inline void arch_send_call_function_single_ipi(int cpu)
{
- loongson3_send_ipi_single(cpu, SMP_CALL_FUNCTION);
+ loongson_send_ipi_single(cpu, SMP_CALL_FUNCTION);
}
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- loongson3_send_ipi_mask(mask, SMP_CALL_FUNCTION);
+ loongson_send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
#ifdef CONFIG_HOTPLUG_CPU
static inline int __cpu_disable(void)
{
- return loongson3_cpu_disable();
+ return loongson_cpu_disable();
}
static inline void __cpu_die(unsigned int cpu)
{
- loongson3_cpu_die(cpu);
+ loongson_cpu_die(cpu);
}
extern void play_dead(void);
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index 335398482038..8319cc409009 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -56,23 +56,6 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
return ioremap_cache(phys, size);
}
-void __init acpi_boot_table_init(void)
-{
- /*
- * If acpi_disabled, bail out
- */
- if (acpi_disabled)
- return;
-
- /*
- * Initialize the ACPI boot-time table parser.
- */
- if (acpi_table_init()) {
- disable_acpi();
- return;
- }
-}
-
#ifdef CONFIG_SMP
static int set_processor_mask(u32 id, u32 flags)
{
@@ -156,13 +139,21 @@ static void __init acpi_process_madt(void)
loongson_sysconf.nr_cpus = num_processors;
}
-int __init acpi_boot_init(void)
+void __init acpi_boot_table_init(void)
{
/*
* If acpi_disabled, bail out
*/
if (acpi_disabled)
- return -1;
+ return;
+
+ /*
+ * Initialize the ACPI boot-time table parser.
+ */
+ if (acpi_table_init()) {
+ disable_acpi();
+ return;
+ }
loongson_sysconf.boot_cpu_id = read_csr_cpuid();
@@ -173,8 +164,6 @@ int __init acpi_boot_init(void)
/* Do not enable ACPI SPCR console by default */
acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
-
- return 0;
}
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c
index 1ba19c76563e..0524bf1169b7 100644
--- a/arch/loongarch/kernel/irq.c
+++ b/arch/loongarch/kernel/irq.c
@@ -117,7 +117,7 @@ void __init init_IRQ(void)
if (ipi_irq < 0)
panic("IPI IRQ mapping failed\n");
irq_set_percpu_devid(ipi_irq);
- r = request_percpu_irq(ipi_irq, loongson3_ipi_interrupt, "IPI", &ipi_dummy_dev);
+ r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &ipi_dummy_dev);
if (r < 0)
panic("IPI IRQ request failed\n");
#endif
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index 2526b68f1c0f..ddb8ba4eb399 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -152,7 +152,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
childregs->csr_crmd = p->thread.csr_crmd;
childregs->csr_prmd = p->thread.csr_prmd;
childregs->csr_ecfg = p->thread.csr_ecfg;
- return 0;
+ goto out;
}
/* user thread */
@@ -171,14 +171,15 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
*/
childregs->csr_euen = 0;
+ if (clone_flags & CLONE_SETTLS)
+ childregs->regs[2] = tls;
+
+out:
clear_tsk_thread_flag(p, TIF_USEDFPU);
clear_tsk_thread_flag(p, TIF_USEDSIMD);
clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
- if (clone_flags & CLONE_SETTLS)
- childregs->regs[2] = tls;
-
return 0;
}
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 1eb63fa9bc81..ae436def7ee9 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -257,7 +257,6 @@ void __init platform_init(void)
#ifdef CONFIG_ACPI
acpi_gbl_use_default_register_widths = false;
acpi_boot_table_init();
- acpi_boot_init();
#endif
#ifdef CONFIG_NUMA
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 781a4d4bdddc..6ed72f7ff278 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -136,12 +136,12 @@ static void ipi_write_action(int cpu, u32 action)
}
}
-void loongson3_send_ipi_single(int cpu, unsigned int action)
+void loongson_send_ipi_single(int cpu, unsigned int action)
{
ipi_write_action(cpu_logical_map(cpu), (u32)action);
}
-void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
@@ -149,7 +149,7 @@ void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
ipi_write_action(cpu_logical_map(i), (u32)action);
}
-irqreturn_t loongson3_ipi_interrupt(int irq, void *dev)
+irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
{
unsigned int action;
unsigned int cpu = smp_processor_id();
@@ -169,7 +169,7 @@ irqreturn_t loongson3_ipi_interrupt(int irq, void *dev)
return IRQ_HANDLED;
}
-void __init loongson3_smp_setup(void)
+void __init loongson_smp_setup(void)
{
cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
@@ -178,7 +178,7 @@ void __init loongson3_smp_setup(void)
pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
}
-void __init loongson3_prepare_cpus(unsigned int max_cpus)
+void __init loongson_prepare_cpus(unsigned int max_cpus)
{
int i = 0;
@@ -193,7 +193,7 @@ void __init loongson3_prepare_cpus(unsigned int max_cpus)
/*
* Setup the PC, SP, and TP of a secondary processor and start it running!
*/
-void loongson3_boot_secondary(int cpu, struct task_struct *idle)
+void loongson_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned long entry;
@@ -205,13 +205,13 @@ void loongson3_boot_secondary(int cpu, struct task_struct *idle)
csr_mail_send(entry, cpu_logical_map(cpu), 0);
- loongson3_send_ipi_single(cpu, SMP_BOOT_CPU);
+ loongson_send_ipi_single(cpu, SMP_BOOT_CPU);
}
/*
* SMP init and finish on secondary CPUs
*/
-void loongson3_init_secondary(void)
+void loongson_init_secondary(void)
{
unsigned int cpu = smp_processor_id();
unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
@@ -231,7 +231,7 @@ void loongson3_init_secondary(void)
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
}
-void loongson3_smp_finish(void)
+void loongson_smp_finish(void)
{
local_irq_enable();
iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
@@ -240,7 +240,7 @@ void loongson3_smp_finish(void)
#ifdef CONFIG_HOTPLUG_CPU
-int loongson3_cpu_disable(void)
+int loongson_cpu_disable(void)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
@@ -262,7 +262,7 @@ int loongson3_cpu_disable(void)
return 0;
}
-void loongson3_cpu_die(unsigned int cpu)
+void loongson_cpu_die(unsigned int cpu)
{
while (per_cpu(cpu_state, cpu) != CPU_DEAD)
cpu_relax();
@@ -300,19 +300,19 @@ void play_dead(void)
*/
#ifdef CONFIG_PM
-static int loongson3_ipi_suspend(void)
+static int loongson_ipi_suspend(void)
{
return 0;
}
-static void loongson3_ipi_resume(void)
+static void loongson_ipi_resume(void)
{
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
}
-static struct syscore_ops loongson3_ipi_syscore_ops = {
- .resume = loongson3_ipi_resume,
- .suspend = loongson3_ipi_suspend,
+static struct syscore_ops loongson_ipi_syscore_ops = {
+ .resume = loongson_ipi_resume,
+ .suspend = loongson_ipi_suspend,
};
/*
@@ -321,7 +321,7 @@ static struct syscore_ops loongson3_ipi_syscore_ops = {
*/
static int __init ipi_pm_init(void)
{
- register_syscore_ops(&loongson3_ipi_syscore_ops);
+ register_syscore_ops(&loongson_ipi_syscore_ops);
return 0;
}
@@ -425,7 +425,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
init_new_context(current, &init_mm);
current_thread_info()->cpu = 0;
- loongson3_prepare_cpus(max_cpus);
+ loongson_prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
set_cpu_core_map(0);
calculate_cpu_foreign_map();
@@ -436,7 +436,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- loongson3_boot_secondary(cpu, tidle);
+ loongson_boot_secondary(cpu, tidle);
/* Wait for CPU to start and be ready to sync counters */
if (!wait_for_completion_timeout(&cpu_starting,
@@ -465,7 +465,7 @@ asmlinkage void start_secondary(void)
cpu_probe();
constant_clockevent_init();
- loongson3_init_secondary();
+ loongson_init_secondary();
set_cpu_sibling_map(cpu);
set_cpu_core_map(cpu);
@@ -487,11 +487,11 @@ asmlinkage void start_secondary(void)
complete(&cpu_running);
/*
- * irq will be enabled in loongson3_smp_finish(), enabling it too
+ * irq will be enabled in loongson_smp_finish(), enabling it too
* early is dangerous.
*/
WARN_ON_ONCE(!irqs_disabled());
- loongson3_smp_finish();
+ loongson_smp_finish();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c
index b206d9159205..4571c3c87cd4 100644
--- a/arch/loongarch/kernel/unwind_prologue.c
+++ b/arch/loongarch/kernel/unwind_prologue.c
@@ -43,7 +43,8 @@ static bool unwind_by_prologue(struct unwind_state *state)
{
struct stack_info *info = &state->stack_info;
union loongarch_instruction *ip, *ip_end;
- unsigned long frame_size = 0, frame_ra = -1;
+ long frame_ra = -1;
+ unsigned long frame_size = 0;
unsigned long size, offset, pc = state->pc;
if (state->sp >= info->end || state->sp < info->begin)
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 3f8a86c4336a..02e6be9c5b0d 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -67,12 +67,12 @@ linux.bin.ub linux.bin.gz: linux.bin
linux.bin: vmlinux
linux.bin linux.bin.gz linux.bin.ub:
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
- @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')'
+ @echo 'Kernel: $(boot)/$@ is ready' ' (#'$(or $(KBUILD_BUILD_VERSION),`cat .version`)')'
PHONY += simpleImage.$(DTB)
simpleImage.$(DTB): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(addprefix $(boot)/$@., ub unstrip strip)
- @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')'
+ @echo 'Kernel: $(boot)/$@ is ready' ' (#'$(or $(KBUILD_BUILD_VERSION),`cat .version`)')'
define archhelp
echo '* linux.bin - Create raw binary'
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 6caec386ad2f..4678627673df 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -622,6 +622,7 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
return pmd;
}
+#define pmd_young pmd_young
static inline int pmd_young(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_ACCESSED);
diff --git a/arch/nios2/boot/Makefile b/arch/nios2/boot/Makefile
index 8c3ad76602f3..29c11a06b750 100644
--- a/arch/nios2/boot/Makefile
+++ b/arch/nios2/boot/Makefile
@@ -20,7 +20,7 @@ $(obj)/vmlinux.bin: vmlinux FORCE
$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
-$(obj)/vmImage: $(obj)/vmlinux.gz
+$(obj)/vmImage: $(obj)/vmlinux.gz FORCE
$(call if_changed,uimage)
@$(kecho) 'Kernel: $@ is ready'
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 4745bb9998bd..6d8492b6e2b8 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -602,6 +602,7 @@ ____##func(struct pt_regs *regs)
/* kernel/traps.c */
DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
#ifdef CONFIG_PPC_BOOK3S_64
+DECLARE_INTERRUPT_HANDLER_RAW(machine_check_early_boot);
DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
#endif
DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 7786e3ac7611..8c3862b4c259 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -142,7 +142,7 @@ SECTIONS
#endif
.data.rel.ro : AT(ADDR(.data.rel.ro) - LOAD_OFFSET) {
- *(.data.rel.ro*)
+ *(.data.rel.ro .data.rel.ro.*)
}
.branch_lt : AT(ADDR(.branch_lt) - LOAD_OFFSET) {
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 43f1c76d48ce..a379b0ce19ff 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -113,23 +113,19 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
{
int i;
- /* First arg comes in as a 32 bits pointer. */
- EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3));
- EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0));
+ /* Initialize tail_call_cnt, to be skipped if we do tail calls. */
+ EMIT(PPC_RAW_LI(_R4, 0));
+
+#define BPF_TAILCALL_PROLOGUE_SIZE 4
+
EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
- /*
- * Initialize tail_call_cnt in stack frame if we do tail calls.
- * Otherwise, put in NOPs so that it can be skipped when we are
- * invoked through a tail call.
- */
if (ctx->seen & SEEN_TAILCALL)
- EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_1) - 1, _R1,
- bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
- else
- EMIT(PPC_RAW_NOP());
+ EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
-#define BPF_TAILCALL_PROLOGUE_SIZE 16
+ /* First arg comes in as a 32 bits pointer. */
+ EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3));
+ EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0));
/*
* We need a stack frame, but we don't necessarily need to
@@ -170,24 +166,24 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
if (bpf_is_seen_register(ctx, i))
EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
-}
-
-void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
-{
- EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
-
- bpf_jit_emit_common_epilogue(image, ctx);
-
- /* Tear down our stack frame */
if (ctx->seen & SEEN_FUNC)
EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+ /* Tear down our stack frame */
EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
if (ctx->seen & SEEN_FUNC)
EMIT(PPC_RAW_MTLR(_R0));
+}
+
+void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+{
+ EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
+
+ bpf_jit_emit_common_epilogue(image, ctx);
+
EMIT(PPC_RAW_BLR());
}
@@ -244,7 +240,6 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29));
EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array));
EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs)));
- EMIT(PPC_RAW_STW(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
/*
* if (prog == NULL)
@@ -255,19 +250,14 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
/* goto *(prog->bpf_func + prologue_size); */
EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func)));
-
- if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
-
EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE));
-
- if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_MTLR(_R0));
-
EMIT(PPC_RAW_MTCTR(_R3));
EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_1)));
+ /* Put tail_call_cnt in r4 */
+ EMIT(PPC_RAW_MR(_R4, _R0));
+
/* tear restore NVRs, ... */
bpf_jit_emit_common_epilogue(image, ctx);
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index fa78595a6089..593cf09264d8 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -317,9 +317,9 @@ config SMP
config NR_CPUS
int "Maximum number of CPUs (2-512)"
depends on SMP
- range 2 512 if !SBI_V01
- range 2 32 if SBI_V01 && 32BIT
- range 2 64 if SBI_V01 && 64BIT
+ range 2 512 if !RISCV_SBI_V01
+ range 2 32 if RISCV_SBI_V01 && 32BIT
+ range 2 64 if RISCV_SBI_V01 && 64BIT
default "32" if 32BIT
default "64" if 64BIT
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 1b471ff73178..816e753de636 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -23,6 +23,7 @@
#define REG_L __REG_SEL(ld, lw)
#define REG_S __REG_SEL(sd, sw)
#define REG_SC __REG_SEL(sc.d, sc.w)
+#define REG_AMOSWAP_AQ __REG_SEL(amoswap.d.aq, amoswap.w.aq)
#define REG_ASM __REG_SEL(.dword, .word)
#define SZREG __REG_SEL(8, 4)
#define LGREG __REG_SEL(3, 2)
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
index 6cbc7437d886..47d3ab0fcc36 100644
--- a/arch/riscv/include/asm/efi.h
+++ b/arch/riscv/include/asm/efi.h
@@ -10,6 +10,7 @@
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
#ifdef CONFIG_EFI
extern void efi_init(void);
@@ -20,7 +21,10 @@ extern void efi_init(void);
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
-#define arch_efi_call_virt_setup() efi_virtmap_load()
+#define arch_efi_call_virt_setup() ({ \
+ sync_kernel_mappings(efi_mm.pgd); \
+ efi_virtmap_load(); \
+ })
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index 947f23d7b6af..59dc12b5b7e8 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -127,6 +127,13 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
#endif /* __PAGETABLE_PMD_FOLDED */
+static inline void sync_kernel_mappings(pgd_t *pgd)
+{
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ init_mm.pgd + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
@@ -135,9 +142,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (likely(pgd != NULL)) {
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
/* Copy kernel mappings */
- memcpy(pgd + USER_PTRS_PER_PGD,
- init_mm.pgd + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ sync_kernel_mappings(pgd);
}
return pgd;
}
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 7ec936910a96..92ec2d9d7273 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -600,6 +600,7 @@ static inline int pmd_dirty(pmd_t pmd)
return pte_dirty(pmd_pte(pmd));
}
+#define pmd_young pmd_young
static inline int pmd_young(pmd_t pmd)
{
return pte_young(pmd_pte(pmd));
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index d3443be7eedc..3831b638ecab 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -50,6 +50,9 @@ void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops);
/* Clear IPI for current CPU */
void riscv_clear_ipi(void);
+/* Check other CPUs stop or not */
+bool smp_crash_stop_failed(void);
+
/* Secondary hart entry */
asmlinkage void smp_callin(void);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index b9eda3fcbd6d..186abd146eaf 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -404,6 +404,19 @@ handle_syscall_trace_exit:
#ifdef CONFIG_VMAP_STACK
handle_kernel_stack_overflow:
+ /*
+ * Takes the psuedo-spinlock for the shadow stack, in case multiple
+ * harts are concurrently overflowing their kernel stacks. We could
+ * store any value here, but since we're overflowing the kernel stack
+ * already we only have SP to use as a scratch register. So we just
+ * swap in the address of the spinlock, as that's definately non-zero.
+ *
+ * Pairs with a store_release in handle_bad_stack().
+ */
+1: la sp, spin_shadow_stack
+ REG_AMOSWAP_AQ sp, sp, (sp)
+ bnez sp, 1b
+
la sp, shadow_stack
addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
index ee79e6839b86..2d139b724bc8 100644
--- a/arch/riscv/kernel/machine_kexec.c
+++ b/arch/riscv/kernel/machine_kexec.c
@@ -15,6 +15,8 @@
#include <linux/compiler.h> /* For unreachable() */
#include <linux/cpu.h> /* For cpu_down() */
#include <linux/reboot.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
/*
* kexec_image_info - Print received image details
@@ -138,20 +140,35 @@ void machine_shutdown(void)
#endif
}
-/* Override the weak function in kernel/panic.c */
-void crash_smp_send_stop(void)
+static void machine_kexec_mask_interrupts(void)
{
- static int cpus_stopped;
+ unsigned int i;
+ struct irq_desc *desc;
- /*
- * This function can be called twice in panic path, but obviously
- * we execute this only once.
- */
- if (cpus_stopped)
- return;
+ for_each_irq_desc(i, desc) {
+ struct irq_chip *chip;
+ int ret;
+
+ chip = irq_desc_get_chip(desc);
+ if (!chip)
+ continue;
+
+ /*
+ * First try to remove the active state. If this
+ * fails, try to EOI the interrupt.
+ */
+ ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
+
+ if (ret && irqd_irq_inprogress(&desc->irq_data) &&
+ chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
- smp_send_stop();
- cpus_stopped = 1;
+ if (chip->irq_mask)
+ chip->irq_mask(&desc->irq_data);
+
+ if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
+ chip->irq_disable(&desc->irq_data);
+ }
}
/*
@@ -169,6 +186,8 @@ machine_crash_shutdown(struct pt_regs *regs)
crash_smp_send_stop();
crash_save_cpu(regs, smp_processor_id());
+ machine_kexec_mask_interrupts();
+
pr_info("Starting crashdump kernel...\n");
}
@@ -195,6 +214,11 @@ machine_kexec(struct kimage *image)
void *control_code_buffer = page_address(image->control_code_page);
riscv_kexec_method kexec_method = NULL;
+#ifdef CONFIG_SMP
+ WARN(smp_crash_stop_failed(),
+ "Some CPUs may be stale, kdump will be unreliable.\n");
+#endif
+
if (image->type != KEXEC_TYPE_CRASH)
kexec_method = control_code_buffer;
else
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 67ec1fadcfe2..86acd690d529 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -322,10 +322,11 @@ subsys_initcall(topology_init);
void free_initmem(void)
{
- if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
- set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end),
- IS_ENABLED(CONFIG_64BIT) ?
- set_memory_rw : set_memory_rw_nx);
+ if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
+ set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), set_memory_rw_nx);
+ if (IS_ENABLED(CONFIG_64BIT))
+ set_kernel_memory(__init_begin, __init_end, set_memory_nx);
+ }
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 760a64518c58..8c3b59f1f9b8 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -12,6 +12,7 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/kexec.h>
#include <linux/profile.h>
#include <linux/smp.h>
#include <linux/sched.h>
@@ -22,11 +23,13 @@
#include <asm/sbi.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
enum ipi_message_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
IPI_CPU_STOP,
+ IPI_CPU_CRASH_STOP,
IPI_IRQ_WORK,
IPI_TIMER,
IPI_MAX
@@ -71,6 +74,32 @@ static void ipi_stop(void)
wait_for_interrupt();
}
+#ifdef CONFIG_KEXEC_CORE
+static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
+
+static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
+{
+ crash_save_cpu(regs, cpu);
+
+ atomic_dec(&waiting_for_crash_ipi);
+
+ local_irq_disable();
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (cpu_has_hotplug(cpu))
+ cpu_ops[cpu]->cpu_stop();
+#endif
+
+ for(;;)
+ wait_for_interrupt();
+}
+#else
+static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
+{
+ unreachable();
+}
+#endif
+
static const struct riscv_ipi_ops *ipi_ops __ro_after_init;
void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
@@ -124,8 +153,9 @@ void arch_irq_work_raise(void)
void handle_IPI(struct pt_regs *regs)
{
- unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
- unsigned long *stats = ipi_data[smp_processor_id()].stats;
+ unsigned int cpu = smp_processor_id();
+ unsigned long *pending_ipis = &ipi_data[cpu].bits;
+ unsigned long *stats = ipi_data[cpu].stats;
riscv_clear_ipi();
@@ -154,6 +184,10 @@ void handle_IPI(struct pt_regs *regs)
ipi_stop();
}
+ if (ops & (1 << IPI_CPU_CRASH_STOP)) {
+ ipi_cpu_crash_stop(cpu, get_irq_regs());
+ }
+
if (ops & (1 << IPI_IRQ_WORK)) {
stats[IPI_IRQ_WORK]++;
irq_work_run();
@@ -176,6 +210,7 @@ static const char * const ipi_names[] = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
[IPI_CPU_STOP] = "CPU stop interrupts",
+ [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
[IPI_TIMER] = "Timer broadcast interrupts",
};
@@ -235,6 +270,64 @@ void smp_send_stop(void)
cpumask_pr_args(cpu_online_mask));
}
+#ifdef CONFIG_KEXEC_CORE
+/*
+ * The number of CPUs online, not counting this CPU (which may not be
+ * fully online and so not counted in num_online_cpus()).
+ */
+static inline unsigned int num_other_online_cpus(void)
+{
+ unsigned int this_cpu_online = cpu_online(smp_processor_id());
+
+ return num_online_cpus() - this_cpu_online;
+}
+
+void crash_smp_send_stop(void)
+{
+ static int cpus_stopped;
+ cpumask_t mask;
+ unsigned long timeout;
+
+ /*
+ * This function can be called twice in panic path, but obviously
+ * we execute this only once.
+ */
+ if (cpus_stopped)
+ return;
+
+ cpus_stopped = 1;
+
+ /*
+ * If this cpu is the only one alive at this point in time, online or
+ * not, there are no stop messages to be sent around, so just back out.
+ */
+ if (num_other_online_cpus() == 0)
+ return;
+
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+
+ atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
+
+ pr_crit("SMP: stopping secondary CPUs\n");
+ send_ipi_mask(&mask, IPI_CPU_CRASH_STOP);
+
+ /* Wait up to one second for other CPUs to stop */
+ timeout = USEC_PER_SEC;
+ while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
+ udelay(1);
+
+ if (atomic_read(&waiting_for_crash_ipi) > 0)
+ pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
+ cpumask_pr_args(&mask));
+}
+
+bool smp_crash_stop_failed(void)
+{
+ return (atomic_read(&waiting_for_crash_ipi) > 0);
+}
+#endif
+
void smp_send_reschedule(int cpu)
{
send_ipi_single(cpu, IPI_RESCHEDULE);
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index f3e96d60a2ff..7abd8e4c4df6 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -221,11 +221,29 @@ asmlinkage unsigned long get_overflow_stack(void)
OVERFLOW_STACK_SIZE;
}
+/*
+ * A pseudo spinlock to protect the shadow stack from being used by multiple
+ * harts concurrently. This isn't a real spinlock because the lock side must
+ * be taken without a valid stack and only a single register, it's only taken
+ * while in the process of panicing anyway so the performance and error
+ * checking a proper spinlock gives us doesn't matter.
+ */
+unsigned long spin_shadow_stack;
+
asmlinkage void handle_bad_stack(struct pt_regs *regs)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
+ /*
+ * We're done with the shadow stack by this point, as we're on the
+ * overflow stack. Tell any other concurrent overflowing harts that
+ * they can proceed with panicing by releasing the pseudo-spinlock.
+ *
+ * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
+ */
+ smp_store_release(&spin_shadow_stack, 0);
+
console_verbose();
pr_emerg("Insufficient stack space to handle exception!\n");
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index db6548509bb3..06e6b27f3bcc 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -17,6 +17,7 @@ vdso-syms += flush_icache
obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
ccflags-y := -fno-stack-protector
+ccflags-y += -DDISABLE_BRANCH_PROFILING
ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index f1cb9391190d..11e901286414 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -763,6 +763,7 @@ static inline int pmd_dirty(pmd_t pmd)
return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
}
+#define pmd_young pmd_young
static inline int pmd_young(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 87be3e855bf7..c907f747d2a0 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -199,7 +199,16 @@ unsigned long __get_wchan(struct task_struct *p);
/* Has task runtime instrumentation enabled ? */
#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
-register unsigned long current_stack_pointer asm("r15");
+/* avoid using global register due to gcc bug in versions < 8.4 */
+#define current_stack_pointer (__current_stack_pointer())
+
+static __always_inline unsigned long __current_stack_pointer(void)
+{
+ unsigned long sp;
+
+ asm volatile("lgr %0,15" : "=d" (sp));
+ return sp;
+}
static __always_inline unsigned short stap(void)
{
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index dd74fe664ed1..e4ef67e4da0a 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -46,7 +46,7 @@ struct save_area {
u64 fprs[16];
u32 fpc;
u32 prefix;
- u64 todpreg;
+ u32 todpreg;
u64 timer;
u64 todcmp;
u64 vxrs_low[16];
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index a779418ceba9..3bc9736bddb1 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -693,6 +693,7 @@ static inline unsigned long pmd_dirty(pmd_t pmd)
return pte_dirty(pte);
}
+#define pmd_young pmd_young
static inline unsigned long pmd_young(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 9860ca5979f8..9e38ffaadb5d 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -83,7 +83,7 @@ cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
$(call if_changed,image)
- @$(kecho) 'Kernel: $@ is ready' ' (#'`cat .version`')'
+ @$(kecho) 'Kernel: $@ is ready' ' (#'$(or $(KBUILD_BUILD_VERSION),`cat .version`)')'
OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S
$(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 8b70237c33f7..d6f3703e4119 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -861,8 +861,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
pmu_enabled = cpuc->enabled;
cpuc->enabled = 0;
- /* stop everything (includes BRS) */
- amd_pmu_disable_all();
+ amd_brs_disable_all();
/* Drain BRS is in use (could be inactive) */
if (cpuc->lbr_users)
@@ -873,7 +872,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
cpuc->enabled = pmu_enabled;
if (pmu_enabled)
- amd_pmu_enable_all(0);
+ amd_brs_enable_all();
return amd_pmu_adjust_nmi_window(handled);
}
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index d568afc705d2..83f15fe411b3 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -553,6 +553,7 @@ static void uncore_clean_online(void)
hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
hlist_del(&uncore->node);
+ kfree(uncore->events);
kfree(uncore);
}
}
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 82ef87e9a897..42a55794004a 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -1263,6 +1263,15 @@ static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages)
if (1 << order != nr_pages)
goto out;
+ /*
+ * Some processors cannot always support single range for more than
+ * 4KB - refer errata TGL052, ADL037 and RPL017. Future processors might
+ * also be affected, so for now rather than trying to keep track of
+ * which ones, just disable it for all.
+ */
+ if (nr_pages > 1)
+ goto out;
+
buf->single = true;
buf->nr_pages = nr_pages;
ret = 0;
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index f49bc3ec76e6..a269049a43ce 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -77,7 +77,7 @@ static int hyperv_init_ghcb(void)
static int hv_cpu_init(unsigned int cpu)
{
union hv_vp_assist_msr_contents msr = { 0 };
- struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
+ struct hv_vp_assist_page **hvp = &hv_vp_assist_page[cpu];
int ret;
ret = hv_common_cpu_init(cpu);
@@ -87,34 +87,32 @@ static int hv_cpu_init(unsigned int cpu)
if (!hv_vp_assist_page)
return 0;
- if (!*hvp) {
- if (hv_root_partition) {
- /*
- * For root partition we get the hypervisor provided VP assist
- * page, instead of allocating a new page.
- */
- rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
- *hvp = memremap(msr.pfn <<
- HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT,
- PAGE_SIZE, MEMREMAP_WB);
- } else {
- /*
- * The VP assist page is an "overlay" page (see Hyper-V TLFS's
- * Section 5.2.1 "GPA Overlay Pages"). Here it must be zeroed
- * out to make sure we always write the EOI MSR in
- * hv_apic_eoi_write() *after* the EOI optimization is disabled
- * in hv_cpu_die(), otherwise a CPU may not be stopped in the
- * case of CPU offlining and the VM will hang.
- */
+ if (hv_root_partition) {
+ /*
+ * For root partition we get the hypervisor provided VP assist
+ * page, instead of allocating a new page.
+ */
+ rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
+ *hvp = memremap(msr.pfn << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT,
+ PAGE_SIZE, MEMREMAP_WB);
+ } else {
+ /*
+ * The VP assist page is an "overlay" page (see Hyper-V TLFS's
+ * Section 5.2.1 "GPA Overlay Pages"). Here it must be zeroed
+ * out to make sure we always write the EOI MSR in
+ * hv_apic_eoi_write() *after* the EOI optimization is disabled
+ * in hv_cpu_die(), otherwise a CPU may not be stopped in the
+ * case of CPU offlining and the VM will hang.
+ */
+ if (!*hvp)
*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO);
- if (*hvp)
- msr.pfn = vmalloc_to_pfn(*hvp);
- }
- WARN_ON(!(*hvp));
- if (*hvp) {
- msr.enable = 1;
- wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
- }
+ if (*hvp)
+ msr.pfn = vmalloc_to_pfn(*hvp);
+
+ }
+ if (!WARN_ON(!(*hvp))) {
+ msr.enable = 1;
+ wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
}
return hyperv_init_ghcb();
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index b71f4f2ecdd5..b2da7cb64b31 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -305,6 +305,9 @@
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
+
+#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
+
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 10ac52705892..4a2af82553e4 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -535,6 +535,11 @@
#define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
+
+#define MSR_AMD64_DE_CFG 0xc0011029
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
+
#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
@@ -640,9 +645,6 @@
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
#define MSR_FAM10H_NODE_ID 0xc001100c
-#define MSR_F10H_DECFG 0xc0011029
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index c936ce9f0c47..dfdb103ae4f6 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -321,7 +321,7 @@ static inline void indirect_branch_prediction_barrier(void)
/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;
DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
-extern void write_spec_ctrl_current(u64 val, bool force);
+extern void update_spec_ctrl_cond(u64 val);
extern u64 spec_ctrl_current(void);
/*
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5059799bebe3..286a71810f9e 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -139,6 +139,7 @@ static inline int pmd_dirty(pmd_t pmd)
return pmd_flags(pmd) & _PAGE_DIRTY;
}
+#define pmd_young pmd_young
static inline int pmd_young(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_ACCESSED;
@@ -1438,6 +1439,14 @@ static inline bool arch_has_hw_pte_young(void)
return true;
}
+#ifdef CONFIG_XEN_PV
+#define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young
+static inline bool arch_has_hw_nonleaf_pmd_young(void)
+{
+ return !cpu_feature_enabled(X86_FEATURE_XENPV);
+}
+#endif
+
#ifdef CONFIG_PAGE_TABLE_CHECK
static inline bool pte_user_accessible_page(pte_t pte)
{
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
index 60ece592b220..dbb38a6b4dfb 100644
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -37,7 +37,7 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
* rsi = lockval (second argument)
* rdx = internal variable (set to 0)
*/
-asm (".pushsection .spinlock.text;"
+asm (".pushsection .spinlock.text, \"ax\";"
".globl " PV_UNLOCK ";"
".type " PV_UNLOCK ", @function;"
".align 4,0x90;"
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 860b60273df3..c75d75b9f11a 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -770,8 +770,6 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
}
-#define MSR_AMD64_DE_CFG 0xC0011029
-
static void init_amd_ln(struct cpuinfo_x86 *c)
{
/*
@@ -965,8 +963,8 @@ static void init_amd(struct cpuinfo_x86 *c)
* msr_set_bit() uses the safe accessors, too, even if the MSR
* is not present.
*/
- msr_set_bit(MSR_F10H_DECFG,
- MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+ msr_set_bit(MSR_AMD64_DE_CFG,
+ MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
/* A serializing LFENCE stops RDTSC speculation */
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 3e3230cccaa7..6daf84229548 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -60,11 +60,18 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
static DEFINE_MUTEX(spec_ctrl_mutex);
+/* Update SPEC_CTRL MSR and its cached copy unconditionally */
+static void update_spec_ctrl(u64 val)
+{
+ this_cpu_write(x86_spec_ctrl_current, val);
+ wrmsrl(MSR_IA32_SPEC_CTRL, val);
+}
+
/*
* Keep track of the SPEC_CTRL MSR value for the current task, which may differ
* from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
*/
-void write_spec_ctrl_current(u64 val, bool force)
+void update_spec_ctrl_cond(u64 val)
{
if (this_cpu_read(x86_spec_ctrl_current) == val)
return;
@@ -75,7 +82,7 @@ void write_spec_ctrl_current(u64 val, bool force)
* When KERNEL_IBRS this MSR is written on return-to-user, unless
* forced the update can be delayed until that time.
*/
- if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
+ if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
wrmsrl(MSR_IA32_SPEC_CTRL, val);
}
@@ -1328,7 +1335,7 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
if (ia32_cap & ARCH_CAP_RRSBA) {
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
- write_spec_ctrl_current(x86_spec_ctrl_base, true);
+ update_spec_ctrl(x86_spec_ctrl_base);
}
}
@@ -1450,7 +1457,7 @@ static void __init spectre_v2_select_mitigation(void)
if (spectre_v2_in_ibrs_mode(mode)) {
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
- write_spec_ctrl_current(x86_spec_ctrl_base, true);
+ update_spec_ctrl(x86_spec_ctrl_base);
}
switch (mode) {
@@ -1564,7 +1571,7 @@ static void __init spectre_v2_select_mitigation(void)
static void update_stibp_msr(void * __unused)
{
u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
- write_spec_ctrl_current(val, true);
+ update_spec_ctrl(val);
}
/* Update x86_spec_ctrl_base in case SMT state changed. */
@@ -1797,7 +1804,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
x86_amd_ssb_disable();
} else {
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
- write_spec_ctrl_current(x86_spec_ctrl_base, true);
+ update_spec_ctrl(x86_spec_ctrl_base);
}
}
@@ -2048,7 +2055,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
void x86_spec_ctrl_setup_ap(void)
{
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
- write_spec_ctrl_current(x86_spec_ctrl_base, true);
+ update_spec_ctrl(x86_spec_ctrl_base);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
x86_amd_ssb_disable();
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index 21fd425088fe..c393b8773ace 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -326,8 +326,8 @@ static void init_hygon(struct cpuinfo_x86 *c)
* msr_set_bit() uses the safe accessors, too, even if the MSR
* is not present.
*/
- msr_set_bit(MSR_F10H_DECFG,
- MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+ msr_set_bit(MSR_AMD64_DE_CFG,
+ MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
/* A serializing LFENCE stops RDTSC speculation */
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index ebe79d60619f..da8b8ea6b063 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -356,6 +356,9 @@ static int sgx_validate_offset_length(struct sgx_encl *encl,
if (!length || !IS_ALIGNED(length, PAGE_SIZE))
return -EINVAL;
+ if (offset + length < offset)
+ return -EINVAL;
+
if (offset + length - PAGE_SIZE >= encl->size)
return -EINVAL;
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
index ec7bbac3a9f2..8009c8346d8f 100644
--- a/arch/x86/kernel/cpu/tsx.c
+++ b/arch/x86/kernel/cpu/tsx.c
@@ -58,24 +58,6 @@ static void tsx_enable(void)
wrmsrl(MSR_IA32_TSX_CTRL, tsx);
}
-static bool tsx_ctrl_is_supported(void)
-{
- u64 ia32_cap = x86_read_arch_cap_msr();
-
- /*
- * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
- * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
- *
- * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
- * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
- * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
- * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
- * tsx= cmdline requests will do nothing on CPUs without
- * MSR_IA32_TSX_CTRL support.
- */
- return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
-}
-
static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
{
if (boot_cpu_has_bug(X86_BUG_TAA))
@@ -135,7 +117,7 @@ static void tsx_clear_cpuid(void)
rdmsrl(MSR_TSX_FORCE_ABORT, msr);
msr |= MSR_TFA_TSX_CPUID_CLEAR;
wrmsrl(MSR_TSX_FORCE_ABORT, msr);
- } else if (tsx_ctrl_is_supported()) {
+ } else if (cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL)) {
rdmsrl(MSR_IA32_TSX_CTRL, msr);
msr |= TSX_CTRL_CPUID_CLEAR;
wrmsrl(MSR_IA32_TSX_CTRL, msr);
@@ -158,7 +140,8 @@ static void tsx_dev_mode_disable(void)
u64 mcu_opt_ctrl;
/* Check if RTM_ALLOW exists */
- if (!boot_cpu_has_bug(X86_BUG_TAA) || !tsx_ctrl_is_supported() ||
+ if (!boot_cpu_has_bug(X86_BUG_TAA) ||
+ !cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL) ||
!cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL))
return;
@@ -191,7 +174,20 @@ void __init tsx_init(void)
return;
}
- if (!tsx_ctrl_is_supported()) {
+ /*
+ * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
+ * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
+ *
+ * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
+ * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
+ * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
+ * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
+ * tsx= cmdline requests will do nothing on CPUs without
+ * MSR_IA32_TSX_CTRL support.
+ */
+ if (x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR) {
+ setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL);
+ } else {
tsx_ctrl_state = TSX_CTRL_NOT_SUPPORTED;
return;
}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 3b28c5b25e12..d00db56a8868 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -605,9 +605,9 @@ int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal)
if (test_thread_flag(TIF_NEED_FPU_LOAD))
fpregs_restore_userregs();
save_fpregs_to_fpstate(dst_fpu);
+ fpregs_unlock();
if (!(clone_flags & CLONE_THREAD))
fpu_inherit_perms(dst_fpu);
- fpregs_unlock();
/*
* Children never inherit PASID state.
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c21b7347a26d..e436c9c1ef3b 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -600,7 +600,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
}
if (updmsr)
- write_spec_ctrl_current(msr, false);
+ update_spec_ctrl_cond(msr);
}
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 1ccb769f62af..b6f96d47e596 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2443,6 +2443,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
{
bool list_unstable, zapped_root = false;
+ lockdep_assert_held_write(&kvm->mmu_lock);
trace_kvm_mmu_prepare_zap_page(sp);
++kvm->stat.mmu_shadow_zapped;
*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
@@ -4262,14 +4263,14 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (is_page_fault_stale(vcpu, fault, mmu_seq))
goto out_unlock;
- r = make_mmu_pages_available(vcpu);
- if (r)
- goto out_unlock;
-
- if (is_tdp_mmu_fault)
+ if (is_tdp_mmu_fault) {
r = kvm_tdp_mmu_map(vcpu, fault);
- else
+ } else {
+ r = make_mmu_pages_available(vcpu);
+ if (r)
+ goto out_unlock;
r = __direct_map(vcpu, fault);
+ }
out_unlock:
if (is_tdp_mmu_fault)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 4c620999d230..995bc0f90759 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1091,6 +1091,12 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN))
+ return;
+
+ kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
}
@@ -1125,6 +1131,9 @@ void svm_free_nested(struct vcpu_svm *svm)
if (!svm->nested.initialized)
return;
+ if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr))
+ svm_switch_vmcb(svm, &svm->vmcb01);
+
svm_vcpu_free_msrpm(svm->nested.msrpm);
svm->nested.msrpm = NULL;
@@ -1143,9 +1152,6 @@ void svm_free_nested(struct vcpu_svm *svm)
svm->nested.initialized = false;
}
-/*
- * Forcibly leave nested mode in order to be able to reset the VCPU later on.
- */
void svm_leave_nested(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 9f88c8e6766e..ce362e88a567 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -346,12 +346,6 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
return 0;
}
-static int is_external_interrupt(u32 info)
-{
- info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
- return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
-}
-
static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1438,6 +1432,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
*/
svm_clear_current_vmcb(svm->vmcb);
+ svm_leave_nested(vcpu);
svm_free_nested(svm);
sev_free_vcpu(vcpu);
@@ -2709,9 +2704,9 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
msr->data = 0;
switch (msr->index) {
- case MSR_F10H_DECFG:
- if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
- msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
+ case MSR_AMD64_DE_CFG:
+ if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
+ msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
break;
case MSR_IA32_PERF_CAPABILITIES:
return 0;
@@ -2812,7 +2807,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = 0x1E;
}
break;
- case MSR_F10H_DECFG:
+ case MSR_AMD64_DE_CFG:
msr_info->data = svm->msr_decfg;
break;
default:
@@ -3041,7 +3036,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_VM_IGNNE:
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
- case MSR_F10H_DECFG: {
+ case MSR_AMD64_DE_CFG: {
struct kvm_msr_entry msr_entry;
msr_entry.index = msr->index;
@@ -3425,15 +3420,6 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
return 0;
}
- if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
- exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
- exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
- exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
- printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
- "exit_code 0x%x\n",
- __func__, svm->vmcb->control.exit_int_info,
- exit_code);
-
if (exit_fastpath != EXIT_FASTPATH_NONE)
return 1;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 0c62352dda6a..5b0d4859e4b7 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4854,6 +4854,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu)
{
+ kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
}
@@ -6440,9 +6441,6 @@ out:
return kvm_state.size;
}
-/*
- * Forcibly leave nested mode in order to be able to reset the VCPU later on.
- */
void vmx_leave_nested(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu)) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ecea83f0da49..2835bd796639 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -628,6 +628,12 @@ static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vecto
ex->payload = payload;
}
+/* Forcibly leave the nested mode in cases like a vCPU reset */
+static void kvm_leave_nested(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops.nested_ops->leave_nested(vcpu);
+}
+
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
unsigned nr, bool has_error, u32 error_code,
bool has_payload, unsigned long payload, bool reinject)
@@ -1557,7 +1563,7 @@ static const u32 msr_based_features_all[] = {
MSR_IA32_VMX_EPT_VPID_CAP,
MSR_IA32_VMX_VMFUNC,
- MSR_F10H_DECFG,
+ MSR_AMD64_DE_CFG,
MSR_IA32_UCODE_REV,
MSR_IA32_ARCH_CAPABILITIES,
MSR_IA32_PERF_CAPABILITIES,
@@ -5195,7 +5201,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
- kvm_x86_ops.nested_ops->leave_nested(vcpu);
+ kvm_leave_nested(vcpu);
kvm_smm_changed(vcpu, events->smi.smm);
}
@@ -9805,7 +9811,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
int kvm_check_nested_events(struct kvm_vcpu *vcpu)
{
- if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
+ if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
kvm_x86_ops.nested_ops->triple_fault(vcpu);
return 1;
}
@@ -10560,15 +10566,16 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
r = 0;
goto out;
}
- if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
- if (is_guest_mode(vcpu)) {
+ if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
+ if (is_guest_mode(vcpu))
kvm_x86_ops.nested_ops->triple_fault(vcpu);
- } else {
+
+ if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
vcpu->mmio_needed = 0;
r = 0;
- goto out;
}
+ goto out;
}
if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
/* Page is swapped out. Do synthetic halt */
@@ -11997,8 +12004,18 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
WARN_ON_ONCE(!init_event &&
(old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu)));
+ /*
+ * SVM doesn't unconditionally VM-Exit on INIT and SHUTDOWN, thus it's
+ * possible to INIT the vCPU while L2 is active. Force the vCPU back
+ * into L1 as EFER.SVME is cleared on INIT (along with all other EFER
+ * bits), i.e. virtualization is disabled.
+ */
+ if (is_guest_mode(vcpu))
+ kvm_leave_nested(vcpu);
+
kvm_lapic_reset(vcpu, init_event);
+ WARN_ON_ONCE(is_guest_mode(vcpu) || is_smm(vcpu));
vcpu->arch.hflags = 0;
vcpu->arch.smi_pending = 0;
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 2dae413bd62a..f3098c0e386a 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -954,6 +954,14 @@ static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
}
+static inline int max_evtchn_port(struct kvm *kvm)
+{
+ if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
+ return EVTCHN_2L_NR_CHANNELS;
+ else
+ return COMPAT_EVTCHN_2L_NR_CHANNELS;
+}
+
static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
evtchn_port_t *ports)
{
@@ -1042,6 +1050,10 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
*r = -EFAULT;
goto out;
}
+ if (ports[i] >= max_evtchn_port(vcpu->kvm)) {
+ *r = -EINVAL;
+ goto out;
+ }
}
if (sched_poll.nr_ports == 1)
@@ -1215,6 +1227,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
bool longmode;
u64 input, params[6], r = -ENOSYS;
bool handled = false;
+ u8 cpl;
input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
@@ -1242,9 +1255,17 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
params[5] = (u64)kvm_r9_read(vcpu);
}
#endif
+ cpl = static_call(kvm_x86_get_cpl)(vcpu);
trace_kvm_xen_hypercall(input, params[0], params[1], params[2],
params[3], params[4], params[5]);
+ /*
+ * Only allow hypercall acceleration for CPL0. The rare hypercalls that
+ * are permitted in guest userspace can be handled by the VMM.
+ */
+ if (unlikely(cpl > 0))
+ goto handle_in_userspace;
+
switch (input) {
case __HYPERVISOR_xen_version:
if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) {
@@ -1279,10 +1300,11 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
if (handled)
return kvm_xen_hypercall_set_result(vcpu, r);
+handle_in_userspace:
vcpu->run->exit_reason = KVM_EXIT_XEN;
vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
vcpu->run->xen.u.hcall.longmode = longmode;
- vcpu->run->xen.u.hcall.cpl = static_call(kvm_x86_get_cpl)(vcpu);
+ vcpu->run->xen.u.hcall.cpl = cpl;
vcpu->run->xen.u.hcall.input = input;
vcpu->run->xen.u.hcall.params[0] = params[0];
vcpu->run->xen.u.hcall.params[1] = params[1];
@@ -1297,14 +1319,6 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
return 0;
}
-static inline int max_evtchn_port(struct kvm *kvm)
-{
- if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
- return EVTCHN_2L_NR_CHANNELS;
- else
- return COMPAT_EVTCHN_2L_NR_CHANNELS;
-}
-
static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
{
int poll_evtchn = vcpu->arch.xen.poll_evtchn;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 78c5bc654cff..6453fbaedb08 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -217,9 +217,15 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PHYSICAL_PAGE_MASK;
+ phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
+ /*
+ * Mask out any bits not part of the actual physical
+ * address, like memory encryption bits.
+ */
+ phys_addr &= PHYSICAL_PAGE_MASK;
+
retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
pcm, &new_pcm);
if (retval) {
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 00127abd89ee..99620428ad78 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -11,7 +11,6 @@
#include <linux/bpf.h>
#include <linux/memory.h>
#include <linux/sort.h>
-#include <linux/init.h>
#include <asm/extable.h>
#include <asm/set_memory.h>
#include <asm/nospec-branch.h>
@@ -389,18 +388,6 @@ out:
return ret;
}
-int __init bpf_arch_init_dispatcher_early(void *ip)
-{
- const u8 *nop_insn = x86_nops[5];
-
- if (is_endbr(*(u32 *)ip))
- ip += ENDBR_INSN_SIZE;
-
- if (memcmp(ip, nop_insn, X86_PATCH_SIZE))
- text_poke_early(ip, nop_insn, X86_PATCH_SIZE);
- return 0;
-}
-
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *old_addr, void *new_addr)
{
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index bb176c72891c..93ae33248f42 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -513,15 +513,23 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
static void pm_save_spec_msr(void)
{
- u32 spec_msr_id[] = {
- MSR_IA32_SPEC_CTRL,
- MSR_IA32_TSX_CTRL,
- MSR_TSX_FORCE_ABORT,
- MSR_IA32_MCU_OPT_CTRL,
- MSR_AMD64_LS_CFG,
+ struct msr_enumeration {
+ u32 msr_no;
+ u32 feature;
+ } msr_enum[] = {
+ { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL },
+ { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL },
+ { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT },
+ { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
+ { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD },
+ { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC },
};
+ int i;
- msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
+ for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
+ if (boot_cpu_has(msr_enum[i].feature))
+ msr_build_context(&msr_enum[i].msr_no, 1);
+ }
}
static int pm_check_save_msr(void)
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index f82857e48815..038da45f057a 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -23,6 +23,7 @@
#include <linux/start_kernel.h>
#include <linux/sched.h>
#include <linux/kprobes.h>
+#include <linux/kstrtox.h>
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/mm.h>
@@ -113,7 +114,7 @@ static __read_mostly bool xen_msr_safe = IS_ENABLED(CONFIG_XEN_PV_MSR_SAFE);
static int __init parse_xen_msr_safe(char *str)
{
if (str)
- return strtobool(str, &xen_msr_safe);
+ return kstrtobool(str, &xen_msr_safe);
return -EINVAL;
}
early_param("xen_msr_safe", parse_xen_msr_safe);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 4f4309500559..8db26f10fb1d 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/kstrtox.h>
#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/memblock.h>
@@ -85,7 +86,7 @@ static void __init xen_parse_512gb(void)
arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
if (!arg)
val = true;
- else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
+ else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val))
return;
xen_512gb_limit = val;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 6a5c849ee061..ed761c62ad0a 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1213,7 +1213,7 @@ static int blkcg_css_online(struct cgroup_subsys_state *css)
* parent so that offline always happens towards the root.
*/
if (parent)
- blkcg_pin_online(css);
+ blkcg_pin_online(&parent->css);
return 0;
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 17667159482e..5487912befe8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -425,7 +425,6 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto fail_stats;
- blk_queue_dma_alignment(q, 511);
blk_set_default_limits(&q->limits);
q->nr_requests = BLKDEV_DEFAULT_RQ;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6a789cda68a5..228a6696d835 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4045,9 +4045,14 @@ EXPORT_SYMBOL(__blk_mq_alloc_disk);
struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
struct lock_class_key *lkclass)
{
+ struct gendisk *disk;
+
if (!blk_get_queue(q))
return NULL;
- return __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
+ disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
+ if (!disk)
+ blk_put_queue(q);
+ return disk;
}
EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 8bb9eef5310e..8ac1038d0c79 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -57,8 +57,8 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->misaligned = 0;
lim->zoned = BLK_ZONED_NONE;
lim->zone_write_granularity = 0;
+ lim->dma_alignment = 511;
}
-EXPORT_SYMBOL(blk_set_default_limits);
/**
* blk_set_stacking_limits - set default limits for stacking devices
@@ -600,6 +600,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
+ t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
/* Set non-power-of-2 compatible chunk_sectors boundary */
if (b->chunk_sectors)
@@ -773,7 +774,7 @@ EXPORT_SYMBOL(blk_queue_virt_boundary);
**/
void blk_queue_dma_alignment(struct request_queue *q, int mask)
{
- q->dma_alignment = mask;
+ q->limits.dma_alignment = mask;
}
EXPORT_SYMBOL(blk_queue_dma_alignment);
@@ -795,8 +796,8 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
{
BUG_ON(mask > PAGE_SIZE);
- if (mask > q->dma_alignment)
- q->dma_alignment = mask;
+ if (mask > q->limits.dma_alignment)
+ q->limits.dma_alignment = mask;
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
diff --git a/block/blk.h b/block/blk.h
index d6ea0d1a6db0..a186ea20f39d 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -331,6 +331,7 @@ void blk_rq_set_mixed_merge(struct request *rq);
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
+void blk_set_default_limits(struct queue_limits *lim);
int blk_dev_init(void);
/*
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index f52265293482..73db0cb44fc7 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -1778,7 +1778,7 @@ static void speakup_con_update(struct vc_data *vc)
{
unsigned long flags;
- if (!speakup_console[vc->vc_num] || spk_parked)
+ if (!speakup_console[vc->vc_num] || spk_parked || !synth)
return;
if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
/* Speakup output, discard */
diff --git a/drivers/accessibility/speakup/utils.h b/drivers/accessibility/speakup/utils.h
index 4bf2ee8ac246..4ce9a12f7664 100644
--- a/drivers/accessibility/speakup/utils.h
+++ b/drivers/accessibility/speakup/utils.h
@@ -54,7 +54,7 @@ static inline int oops(const char *msg, const char *info)
static inline struct st_key *hash_name(char *name)
{
- u_char *pn = (u_char *)name;
+ unsigned char *pn = (unsigned char *)name;
int hash = 0;
while (*pn) {
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 23f49a2f4d14..6cceca64a6bc 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -562,17 +562,26 @@ static int initiator_cmp(void *priv, const struct list_head *a,
{
struct memory_initiator *ia;
struct memory_initiator *ib;
- unsigned long *p_nodes = priv;
ia = list_entry(a, struct memory_initiator, node);
ib = list_entry(b, struct memory_initiator, node);
- set_bit(ia->processor_pxm, p_nodes);
- set_bit(ib->processor_pxm, p_nodes);
-
return ia->processor_pxm - ib->processor_pxm;
}
+static int initiators_to_nodemask(unsigned long *p_nodes)
+{
+ struct memory_initiator *initiator;
+
+ if (list_empty(&initiators))
+ return -ENXIO;
+
+ list_for_each_entry(initiator, &initiators, node)
+ set_bit(initiator->processor_pxm, p_nodes);
+
+ return 0;
+}
+
static void hmat_register_target_initiators(struct memory_target *target)
{
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
@@ -609,7 +618,10 @@ static void hmat_register_target_initiators(struct memory_target *target)
* initiators.
*/
bitmap_zero(p_nodes, MAX_NUMNODES);
- list_sort(p_nodes, &initiators, initiator_cmp);
+ list_sort(NULL, &initiators, initiator_cmp);
+ if (initiators_to_nodemask(p_nodes) < 0)
+ return;
+
if (!access0done) {
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
loc = localities_types[i];
@@ -643,8 +655,9 @@ static void hmat_register_target_initiators(struct memory_target *target)
/* Access 1 ignores Generic Initiators */
bitmap_zero(p_nodes, MAX_NUMNODES);
- list_sort(p_nodes, &initiators, initiator_cmp);
- best = 0;
+ if (initiators_to_nodemask(p_nodes) < 0)
+ return;
+
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
loc = localities_types[i];
if (!loc)
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 1c39cfce32fa..4ad42b0f75cd 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -739,6 +739,12 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
const char *failure_string;
struct binder_buffer *buffer;
+ if (unlikely(vma->vm_mm != alloc->mm)) {
+ ret = -EINVAL;
+ failure_string = "invalid vma->vm_mm";
+ goto err_invalid_mm;
+ }
+
mutex_lock(&binder_alloc_mmap_lock);
if (alloc->buffer_size) {
ret = -EBUSY;
@@ -785,6 +791,7 @@ err_alloc_pages_failed:
alloc->buffer_size = 0;
err_already_mapped:
mutex_unlock(&binder_alloc_mmap_lock);
+err_invalid_mm:
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%s: %d %lx-%lx %s failed %d\n", __func__,
alloc->pid, vma->vm_start, vma->vm_end,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index f3e4db16fd07..8532b839a343 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2672,7 +2672,7 @@ static int init_submitter(struct drbd_device *device)
enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
{
struct drbd_resource *resource = adm_ctx->resource;
- struct drbd_connection *connection;
+ struct drbd_connection *connection, *n;
struct drbd_device *device;
struct drbd_peer_device *peer_device, *tmp_peer_device;
struct gendisk *disk;
@@ -2789,7 +2789,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
return NO_ERROR;
out_idr_remove_from_resource:
- for_each_connection(connection, resource) {
+ for_each_connection_safe(connection, n, resource) {
peer_device = idr_remove(&connection->peer_devices, vnr);
if (peer_device)
kref_put(&connection->kref, drbd_destroy_connection);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index f96cb01e9604..e9de9d846b73 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -57,10 +57,8 @@
#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
struct ublk_rq_data {
- union {
- struct callback_head work;
- struct llist_node node;
- };
+ struct llist_node node;
+ struct callback_head work;
};
struct ublk_uring_cmd_pdu {
@@ -766,15 +764,31 @@ static inline void __ublk_rq_task_work(struct request *req)
ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
}
+static inline void ublk_forward_io_cmds(struct ublk_queue *ubq)
+{
+ struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
+ struct ublk_rq_data *data, *tmp;
+
+ io_cmds = llist_reverse_order(io_cmds);
+ llist_for_each_entry_safe(data, tmp, io_cmds, node)
+ __ublk_rq_task_work(blk_mq_rq_from_pdu(data));
+}
+
+static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
+{
+ struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
+ struct ublk_rq_data *data, *tmp;
+
+ llist_for_each_entry_safe(data, tmp, io_cmds, node)
+ __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
+}
+
static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
{
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
- struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
- struct ublk_rq_data *data;
- llist_for_each_entry(data, io_cmds, node)
- __ublk_rq_task_work(blk_mq_rq_from_pdu(data));
+ ublk_forward_io_cmds(ubq);
}
static void ublk_rq_task_work_fn(struct callback_head *work)
@@ -782,14 +796,20 @@ static void ublk_rq_task_work_fn(struct callback_head *work)
struct ublk_rq_data *data = container_of(work,
struct ublk_rq_data, work);
struct request *req = blk_mq_rq_from_pdu(data);
+ struct ublk_queue *ubq = req->mq_hctx->driver_data;
- __ublk_rq_task_work(req);
+ ublk_forward_io_cmds(ubq);
}
-static void ublk_submit_cmd(struct ublk_queue *ubq, const struct request *rq)
+static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{
- struct ublk_io *io = &ubq->ios[rq->tag];
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
+ struct ublk_io *io;
+ if (!llist_add(&data->node, &ubq->io_cmds))
+ return;
+
+ io = &ubq->ios[rq->tag];
/*
* If the check pass, we know that this is a re-issued request aborted
* previously in monitor_work because the ubq_daemon(cmd's task) is
@@ -803,11 +823,11 @@ static void ublk_submit_cmd(struct ublk_queue *ubq, const struct request *rq)
* guarantees that here is a re-issued request aborted previously.
*/
if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
- struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
- struct ublk_rq_data *data;
-
- llist_for_each_entry(data, io_cmds, node)
- __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
+ ublk_abort_io_cmds(ubq);
+ } else if (ublk_can_use_task_work(ubq)) {
+ if (task_work_add(ubq->ubq_daemon, &data->work,
+ TWA_SIGNAL_NO_IPI))
+ ublk_abort_io_cmds(ubq);
} else {
struct io_uring_cmd *cmd = io->cmd;
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
@@ -817,23 +837,6 @@ static void ublk_submit_cmd(struct ublk_queue *ubq, const struct request *rq)
}
}
-static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq,
- bool last)
-{
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
-
- if (ublk_can_use_task_work(ubq)) {
- enum task_work_notify_mode notify_mode = last ?
- TWA_SIGNAL_NO_IPI : TWA_NONE;
-
- if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
- __ublk_abort_rq(ubq, rq);
- } else {
- if (llist_add(&data->node, &ubq->io_cmds))
- ublk_submit_cmd(ubq, rq);
- }
-}
-
static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -865,19 +868,11 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
- ublk_queue_cmd(ubq, rq, bd->last);
+ ublk_queue_cmd(ubq, rq);
return BLK_STS_OK;
}
-static void ublk_commit_rqs(struct blk_mq_hw_ctx *hctx)
-{
- struct ublk_queue *ubq = hctx->driver_data;
-
- if (ublk_can_use_task_work(ubq))
- __set_notify_signal(ubq->ubq_daemon);
-}
-
static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
unsigned int hctx_idx)
{
@@ -899,7 +894,6 @@ static int ublk_init_rq(struct blk_mq_tag_set *set, struct request *req,
static const struct blk_mq_ops ublk_mq_ops = {
.queue_rq = ublk_queue_rq,
- .commit_rqs = ublk_commit_rqs,
.init_hctx = ublk_init_hctx,
.init_request = ublk_init_rq,
};
@@ -1197,7 +1191,7 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
- ublk_queue_cmd(ubq, req, true);
+ ublk_queue_cmd(ubq, req);
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
diff --git a/drivers/bus/intel-ixp4xx-eb.c b/drivers/bus/intel-ixp4xx-eb.c
index a4388440aca7..91db001eb69a 100644
--- a/drivers/bus/intel-ixp4xx-eb.c
+++ b/drivers/bus/intel-ixp4xx-eb.c
@@ -49,7 +49,7 @@
#define IXP4XX_EXP_SIZE_SHIFT 10
#define IXP4XX_EXP_CNFG_0 BIT(9) /* Always zero */
#define IXP43X_EXP_SYNC_INTEL BIT(8) /* Only on IXP43x */
-#define IXP43X_EXP_EXP_CHIP BIT(7) /* Only on IXP43x */
+#define IXP43X_EXP_EXP_CHIP BIT(7) /* Only on IXP43x, dangerous to touch on IXP42x */
#define IXP4XX_EXP_BYTE_RD16 BIT(6)
#define IXP4XX_EXP_HRDY_POL BIT(5) /* Only on IXP42x */
#define IXP4XX_EXP_MUX_EN BIT(4)
@@ -57,8 +57,6 @@
#define IXP4XX_EXP_WORD BIT(2) /* Always zero */
#define IXP4XX_EXP_WR_EN BIT(1)
#define IXP4XX_EXP_BYTE_EN BIT(0)
-#define IXP42X_RESERVED (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(8)|BIT(7)|IXP4XX_EXP_WORD)
-#define IXP43X_RESERVED (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(5)|IXP4XX_EXP_WORD)
#define IXP4XX_EXP_CNFG0 0x20
#define IXP4XX_EXP_CNFG0_MEM_MAP BIT(31)
@@ -252,10 +250,9 @@ static void ixp4xx_exp_setup_chipselect(struct ixp4xx_eb *eb,
cs_cfg |= val << IXP4XX_EXP_CYC_TYPE_SHIFT;
}
- if (eb->is_42x)
- cs_cfg &= ~IXP42X_RESERVED;
if (eb->is_43x) {
- cs_cfg &= ~IXP43X_RESERVED;
+ /* Should always be zero */
+ cs_cfg &= ~IXP4XX_EXP_WORD;
/*
* This bit for Intel strata flash is currently unused, but let's
* report it if we find one.
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 4cd2e127946e..3aa91aed3bf7 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -267,6 +267,9 @@ EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register);
/* common code that starts a transfer */
static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
{
+ u32 int_mask, status;
+ bool timeout;
+
if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) {
dev_dbg(rsb->dev, "RSB transfer still in progress\n");
return -EBUSY;
@@ -274,13 +277,23 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
reinit_completion(&rsb->complete);
- writel(RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER,
- rsb->regs + RSB_INTE);
+ int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER;
+ writel(int_mask, rsb->regs + RSB_INTE);
writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB,
rsb->regs + RSB_CTRL);
- if (!wait_for_completion_io_timeout(&rsb->complete,
- msecs_to_jiffies(100))) {
+ if (irqs_disabled()) {
+ timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS,
+ status, (status & int_mask),
+ 10, 100000);
+ writel(status, rsb->regs + RSB_INTS);
+ } else {
+ timeout = !wait_for_completion_io_timeout(&rsb->complete,
+ msecs_to_jiffies(100));
+ status = rsb->status;
+ }
+
+ if (timeout) {
dev_dbg(rsb->dev, "RSB timeout\n");
/* abort the transfer */
@@ -292,18 +305,18 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
return -ETIMEDOUT;
}
- if (rsb->status & RSB_INTS_LOAD_BSY) {
+ if (status & RSB_INTS_LOAD_BSY) {
dev_dbg(rsb->dev, "RSB busy\n");
return -EBUSY;
}
- if (rsb->status & RSB_INTS_TRANS_ERR) {
- if (rsb->status & RSB_INTS_TRANS_ERR_ACK) {
+ if (status & RSB_INTS_TRANS_ERR) {
+ if (status & RSB_INTS_TRANS_ERR_ACK) {
dev_dbg(rsb->dev, "RSB slave nack\n");
return -EINVAL;
}
- if (rsb->status & RSB_INTS_TRANS_ERR_DATA) {
+ if (status & RSB_INTS_TRANS_ERR_DATA) {
dev_dbg(rsb->dev, "RSB transfer data error\n");
return -EIO;
}
@@ -812,14 +825,6 @@ static int sunxi_rsb_remove(struct platform_device *pdev)
return 0;
}
-static void sunxi_rsb_shutdown(struct platform_device *pdev)
-{
- struct sunxi_rsb *rsb = platform_get_drvdata(pdev);
-
- pm_runtime_disable(&pdev->dev);
- sunxi_rsb_hw_exit(rsb);
-}
-
static const struct dev_pm_ops sunxi_rsb_dev_pm_ops = {
SET_RUNTIME_PM_OPS(sunxi_rsb_runtime_suspend,
sunxi_rsb_runtime_resume, NULL)
@@ -835,7 +840,6 @@ MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
static struct platform_driver sunxi_rsb_driver = {
.probe = sunxi_rsb_probe,
.remove = sunxi_rsb_remove,
- .shutdown = sunxi_rsb_shutdown,
.driver = {
.name = RSB_CTRL_NAME,
.of_match_table = sunxi_rsb_of_match_table,
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 1621ce818705..d69905233aff 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -401,13 +401,14 @@ int tpm_pm_suspend(struct device *dev)
!pm_suspend_via_firmware())
goto suspended;
- if (!tpm_chip_start(chip)) {
+ rc = tpm_try_get_ops(chip);
+ if (!rc) {
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_shutdown(chip, TPM2_SU_STATE);
else
rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
- tpm_chip_stop(chip);
+ tpm_put_ops(chip);
}
suspended:
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
index b174f727a8ef..16870943a13e 100644
--- a/drivers/clk/at91/at91rm9200.c
+++ b/drivers/clk/at91/at91rm9200.c
@@ -40,7 +40,7 @@ static const struct clk_pll_characteristics rm9200_pll_characteristics = {
};
static const struct sck at91rm9200_systemck[] = {
- { .n = "udpck", .p = "usbck", .id = 2 },
+ { .n = "udpck", .p = "usbck", .id = 1 },
{ .n = "uhpck", .p = "usbck", .id = 4 },
{ .n = "pck0", .p = "prog0", .id = 8 },
{ .n = "pck1", .p = "prog1", .id = 9 },
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index a18ed88f3b82..b3198784e1c3 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -5364,6 +5364,8 @@ static struct clk_branch gcc_ufs_1_card_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_ufs_1_card_clkref_clk",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -5432,6 +5434,8 @@ static struct clk_branch gcc_ufs_card_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_ufs_card_clkref_clk",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -5848,6 +5852,8 @@ static struct clk_branch gcc_ufs_ref_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_ufs_ref_clkref_clk",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 7cf5e130e92f..0f21a8a767ac 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/pm_domain.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset-controller.h>
@@ -56,22 +55,6 @@ enum gdsc_status {
GDSC_ON
};
-static int gdsc_pm_runtime_get(struct gdsc *sc)
-{
- if (!sc->dev)
- return 0;
-
- return pm_runtime_resume_and_get(sc->dev);
-}
-
-static int gdsc_pm_runtime_put(struct gdsc *sc)
-{
- if (!sc->dev)
- return 0;
-
- return pm_runtime_put_sync(sc->dev);
-}
-
/* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */
static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status)
{
@@ -271,8 +254,9 @@ static void gdsc_retain_ff_on(struct gdsc *sc)
regmap_update_bits(sc->regmap, sc->gdscr, mask, mask);
}
-static int _gdsc_enable(struct gdsc *sc)
+static int gdsc_enable(struct generic_pm_domain *domain)
{
+ struct gdsc *sc = domain_to_gdsc(domain);
int ret;
if (sc->pwrsts == PWRSTS_ON)
@@ -328,22 +312,11 @@ static int _gdsc_enable(struct gdsc *sc)
return 0;
}
-static int gdsc_enable(struct generic_pm_domain *domain)
+static int gdsc_disable(struct generic_pm_domain *domain)
{
struct gdsc *sc = domain_to_gdsc(domain);
int ret;
- ret = gdsc_pm_runtime_get(sc);
- if (ret)
- return ret;
-
- return _gdsc_enable(sc);
-}
-
-static int _gdsc_disable(struct gdsc *sc)
-{
- int ret;
-
if (sc->pwrsts == PWRSTS_ON)
return gdsc_assert_reset(sc);
@@ -388,18 +361,6 @@ static int _gdsc_disable(struct gdsc *sc)
return 0;
}
-static int gdsc_disable(struct generic_pm_domain *domain)
-{
- struct gdsc *sc = domain_to_gdsc(domain);
- int ret;
-
- ret = _gdsc_disable(sc);
-
- gdsc_pm_runtime_put(sc);
-
- return ret;
-}
-
static int gdsc_init(struct gdsc *sc)
{
u32 mask, val;
@@ -447,11 +408,6 @@ static int gdsc_init(struct gdsc *sc)
return ret;
}
- /* ...and the power-domain */
- ret = gdsc_pm_runtime_get(sc);
- if (ret)
- goto err_disable_supply;
-
/*
* Votable GDSCs can be ON due to Vote from other masters.
* If a Votable GDSC is ON, make sure we have a Vote.
@@ -459,14 +415,14 @@ static int gdsc_init(struct gdsc *sc)
if (sc->flags & VOTABLE) {
ret = gdsc_update_collapse_bit(sc, false);
if (ret)
- goto err_put_rpm;
+ goto err_disable_supply;
}
/* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true);
if (ret < 0)
- goto err_put_rpm;
+ goto err_disable_supply;
}
/*
@@ -496,13 +452,10 @@ static int gdsc_init(struct gdsc *sc)
ret = pm_genpd_init(&sc->pd, NULL, !on);
if (ret)
- goto err_put_rpm;
+ goto err_disable_supply;
return 0;
-err_put_rpm:
- if (on)
- gdsc_pm_runtime_put(sc);
err_disable_supply:
if (on && sc->rsupply)
regulator_disable(sc->rsupply);
@@ -541,8 +494,6 @@ int gdsc_register(struct gdsc_desc *desc,
for (i = 0; i < num; i++) {
if (!scs[i])
continue;
- if (pm_runtime_enabled(dev))
- scs[i]->dev = dev;
scs[i]->regmap = regmap;
scs[i]->rcdev = rcdev;
ret = gdsc_init(scs[i]);
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 981a12c8502d..803512688336 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -30,7 +30,6 @@ struct reset_controller_dev;
* @resets: ids of resets associated with this gdsc
* @reset_count: number of @resets
* @rcdev: reset controller
- * @dev: the device holding the GDSC, used for pm_runtime calls
*/
struct gdsc {
struct generic_pm_domain pd;
@@ -74,7 +73,6 @@ struct gdsc {
const char *supply;
struct regulator *rsupply;
- struct device *dev;
};
struct gdsc_desc {
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 273f77d54dab..e6d6cbf8c4e6 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -81,17 +81,19 @@ MODULE_DEVICE_TABLE(of, exynos_clkout_ids);
static int exynos_clkout_match_parent_dev(struct device *dev, u32 *mux_mask)
{
const struct exynos_clkout_variant *variant;
+ const struct of_device_id *match;
if (!dev->parent) {
dev_err(dev, "not instantiated from MFD\n");
return -EINVAL;
}
- variant = of_device_get_match_data(dev->parent);
- if (!variant) {
+ match = of_match_device(exynos_clkout_ids, dev->parent);
+ if (!match) {
dev_err(dev, "cannot match parent device\n");
return -EINVAL;
}
+ variant = match->data;
*mux_mask = variant->mux_mask;
diff --git a/drivers/clk/samsung/clk-exynos7885.c b/drivers/clk/samsung/clk-exynos7885.c
index 62ce6814f141..0d2a950ed184 100644
--- a/drivers/clk/samsung/clk-exynos7885.c
+++ b/drivers/clk/samsung/clk-exynos7885.c
@@ -231,7 +231,7 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
DIV(CLK_DOUT_SHARED0_DIV3, "dout_shared0_div3", "fout_shared0_pll",
CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
- DIV(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4", "fout_shared0_pll",
+ DIV(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4", "dout_shared0_div2",
CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
DIV(CLK_DOUT_SHARED0_DIV5, "dout_shared0_div5", "fout_shared0_pll",
CLK_CON_DIV_PLL_SHARED0_DIV5, 0, 3),
@@ -239,7 +239,7 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
DIV(CLK_DOUT_SHARED1_DIV3, "dout_shared1_div3", "fout_shared1_pll",
CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
- DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "fout_shared1_pll",
+ DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "dout_shared1_div2",
CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
/* CORE */
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index a7ff77550e17..933bb960490d 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -806,6 +806,9 @@ static u64 __arch_timer_check_delta(void)
/*
* XGene-1 implements CVAL in terms of TVAL, meaning
* that the maximum timer range is 32bit. Shame on them.
+ *
+ * Note that TVAL is signed, thus has only 31 of its
+ * 32 bits to express magnitude.
*/
MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
APM_CPU_PART_POTENZA)),
@@ -813,8 +816,8 @@ static u64 __arch_timer_check_delta(void)
};
if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
- pr_warn_once("Broken CNTx_CVAL_EL1, limiting width to 32bits");
- return CLOCKSOURCE_MASK(32);
+ pr_warn_once("Broken CNTx_CVAL_EL1, using 31 bit TVAL instead.\n");
+ return CLOCKSOURCE_MASK(31);
}
#endif
return CLOCKSOURCE_MASK(arch_counter_get_width());
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 969a552da8d2..a0d66fabf073 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -51,7 +51,7 @@ static int riscv_clock_next_event(unsigned long delta,
static unsigned int riscv_clock_event_irq;
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 100,
.set_next_event = riscv_clock_next_event,
};
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 310779b07daf..00476e94db90 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -35,7 +35,7 @@ config X86_PCC_CPUFREQ
If in doubt, say N.
config X86_AMD_PSTATE
- tristate "AMD Processor P-State driver"
+ bool "AMD Processor P-State driver"
depends on X86 && ACPI
select ACPI_PROCESSOR
select ACPI_CPPC_LIB if X86_64
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index ace7d50cf2ac..204e39006dda 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -59,12 +59,8 @@
* we disable it by default to go acpi-cpufreq on these processors and add a
* module parameter to be able to enable it manually for debugging.
*/
-static bool shared_mem = false;
-module_param(shared_mem, bool, 0444);
-MODULE_PARM_DESC(shared_mem,
- "enable amd-pstate on processors with shared memory solution (false = disabled (default), true = enabled)");
-
static struct cpufreq_driver amd_pstate_driver;
+static int cppc_load __initdata;
static inline int pstate_enable(bool enable)
{
@@ -424,12 +420,22 @@ static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
amd_pstate_driver.boost_enabled = true;
}
+static void amd_perf_ctl_reset(unsigned int cpu)
+{
+ wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
+}
+
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
struct device *dev;
struct amd_cpudata *cpudata;
+ /*
+ * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
+ * which is ideal for initialization process.
+ */
+ amd_perf_ctl_reset(policy->cpu);
dev = get_cpu_device(policy->cpu);
if (!dev)
return -ENODEV;
@@ -616,6 +622,15 @@ static int __init amd_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
+ /*
+ * by default the pstate driver is disabled to load
+ * enable the amd_pstate passive mode driver explicitly
+ * with amd_pstate=passive in kernel command line
+ */
+ if (!cppc_load) {
+ pr_debug("driver load is disabled, boot with amd_pstate=passive to enable this\n");
+ return -ENODEV;
+ }
if (!acpi_cpc_valid()) {
pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
@@ -630,13 +645,11 @@ static int __init amd_pstate_init(void)
if (boot_cpu_has(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf;
- } else if (shared_mem) {
+ } else {
+ pr_debug("AMD CPPC shared memory based functionality is supported\n");
static_call_update(amd_pstate_enable, cppc_enable);
static_call_update(amd_pstate_init_perf, cppc_init_perf);
static_call_update(amd_pstate_update_perf, cppc_update_perf);
- } else {
- pr_info("This processor supports shared memory solution, you can enable it with amd_pstate.shared_mem=1\n");
- return -ENODEV;
}
/* enable amd pstate feature */
@@ -653,16 +666,22 @@ static int __init amd_pstate_init(void)
return ret;
}
+device_initcall(amd_pstate_init);
-static void __exit amd_pstate_exit(void)
+static int __init amd_pstate_param(char *str)
{
- cpufreq_unregister_driver(&amd_pstate_driver);
+ if (!str)
+ return -EINVAL;
- amd_pstate_enable(false);
-}
+ if (!strcmp(str, "disable")) {
+ cppc_load = 0;
+ pr_info("driver is explicitly disabled\n");
+ } else if (!strcmp(str, "passive"))
+ cppc_load = 1;
-module_init(amd_pstate_init);
-module_exit(amd_pstate_exit);
+ return 0;
+}
+early_param("amd_pstate", amd_pstate_param);
MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
index 97086fab698e..903325aac991 100644
--- a/drivers/dax/hmem/device.c
+++ b/drivers/dax/hmem/device.c
@@ -8,6 +8,13 @@
static bool nohmem;
module_param_named(disable, nohmem, bool, 0444);
+static struct resource hmem_active = {
+ .name = "HMEM devices",
+ .start = 0,
+ .end = -1,
+ .flags = IORESOURCE_MEM,
+};
+
void hmem_register_device(int target_nid, struct resource *r)
{
/* define a clean / non-busy resource for the platform device */
@@ -41,6 +48,12 @@ void hmem_register_device(int target_nid, struct resource *r)
goto out_pdev;
}
+ if (!__request_region(&hmem_active, res.start, resource_size(&res),
+ dev_name(&pdev->dev), 0)) {
+ dev_dbg(&pdev->dev, "hmem range %pr already active\n", &res);
+ goto out_active;
+ }
+
pdev->dev.numa_node = numa_map_to_online_node(target_nid);
info = (struct memregion_info) {
.target_node = target_nid,
@@ -66,6 +79,8 @@ void hmem_register_device(int target_nid, struct resource *r)
return;
out_resource:
+ __release_region(&hmem_active, res.start, resource_size(&res));
+out_active:
platform_device_put(pdev);
out_pdev:
memregion_free(id);
@@ -73,15 +88,6 @@ out_pdev:
static __init int hmem_register_one(struct resource *res, void *data)
{
- /*
- * If the resource is not a top-level resource it was already
- * assigned to a device by the HMAT parsing.
- */
- if (res->parent != &iomem_resource) {
- pr_info("HMEM: skip %pr, already claimed\n", res);
- return 0;
- }
-
hmem_register_device(phys_to_target_node(res->start), res);
return 0;
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index dd0f83ee505b..e6f36c014c4c 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence.h>
+#include <linux/dma-fence-unwrap.h>
#include <linux/anon_inodes.h>
#include <linux/export.h>
#include <linux/debugfs.h>
@@ -391,8 +392,10 @@ static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
const void __user *user_data)
{
struct dma_buf_import_sync_file arg;
- struct dma_fence *fence;
+ struct dma_fence *fence, *f;
enum dma_resv_usage usage;
+ struct dma_fence_unwrap iter;
+ unsigned int num_fences;
int ret = 0;
if (copy_from_user(&arg, user_data, sizeof(arg)))
@@ -411,13 +414,21 @@ static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
DMA_RESV_USAGE_READ;
- dma_resv_lock(dmabuf->resv, NULL);
+ num_fences = 0;
+ dma_fence_unwrap_for_each(f, &iter, fence)
+ ++num_fences;
- ret = dma_resv_reserve_fences(dmabuf->resv, 1);
- if (!ret)
- dma_resv_add_fence(dmabuf->resv, fence, usage);
+ if (num_fences > 0) {
+ dma_resv_lock(dmabuf->resv, NULL);
- dma_resv_unlock(dmabuf->resv);
+ ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
+ if (!ret) {
+ dma_fence_unwrap_for_each(f, &iter, fence)
+ dma_resv_add_fence(dmabuf->resv, f, usage);
+ }
+
+ dma_resv_unlock(dmabuf->resv);
+ }
dma_fence_put(fence);
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 8f5848aa144f..59d158873f4c 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -233,18 +233,6 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
return ERR_PTR(-EINVAL);
}
- /* check the name is unique */
- mutex_lock(&heap_list_lock);
- list_for_each_entry(h, &heap_list, list) {
- if (!strcmp(h->name, exp_info->name)) {
- mutex_unlock(&heap_list_lock);
- pr_err("dma_heap: Already registered heap named %s\n",
- exp_info->name);
- return ERR_PTR(-EINVAL);
- }
- }
- mutex_unlock(&heap_list_lock);
-
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
@@ -283,13 +271,27 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
err_ret = ERR_CAST(dev_ret);
goto err2;
}
- /* Add heap to the list */
+
mutex_lock(&heap_list_lock);
+ /* check the name is unique */
+ list_for_each_entry(h, &heap_list, list) {
+ if (!strcmp(h->name, exp_info->name)) {
+ mutex_unlock(&heap_list_lock);
+ pr_err("dma_heap: Already registered heap named %s\n",
+ exp_info->name);
+ err_ret = ERR_PTR(-EINVAL);
+ goto err3;
+ }
+ }
+
+ /* Add heap to the list */
list_add(&heap->list, &heap_list);
mutex_unlock(&heap_list_lock);
return heap;
+err3:
+ device_destroy(dma_heap_class, heap->heap_devt);
err2:
cdev_del(&heap->heap_cdev);
err1:
diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c
index 41041ff0fadb..2a120d8d3c27 100644
--- a/drivers/extcon/extcon-usbc-tusb320.c
+++ b/drivers/extcon/extcon-usbc-tusb320.c
@@ -327,7 +327,13 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
tusb320_extcon_irq_handler(priv, reg);
- tusb320_typec_irq_handler(priv, reg);
+
+ /*
+ * Type-C support is optional. Only call the Type-C handler if a
+ * port had been registered previously.
+ */
+ if (priv->port)
+ tusb320_typec_irq_handler(priv, reg);
regmap_write(priv->regmap, TUSB320_REG9, reg);
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index c52bcaa9def6..9ca21feb9d45 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -149,12 +149,8 @@ static int coreboot_table_probe(struct platform_device *pdev)
if (!ptr)
return -ENOMEM;
- ret = bus_register(&coreboot_bus_type);
- if (!ret) {
- ret = coreboot_table_populate(dev, ptr);
- if (ret)
- bus_unregister(&coreboot_bus_type);
- }
+ ret = coreboot_table_populate(dev, ptr);
+
memunmap(ptr);
return ret;
@@ -169,7 +165,6 @@ static int __cb_dev_unregister(struct device *dev, void *dummy)
static int coreboot_table_remove(struct platform_device *pdev)
{
bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister);
- bus_unregister(&coreboot_bus_type);
return 0;
}
@@ -199,6 +194,32 @@ static struct platform_driver coreboot_table_driver = {
.of_match_table = of_match_ptr(coreboot_of_match),
},
};
-module_platform_driver(coreboot_table_driver);
+
+static int __init coreboot_table_driver_init(void)
+{
+ int ret;
+
+ ret = bus_register(&coreboot_bus_type);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&coreboot_table_driver);
+ if (ret) {
+ bus_unregister(&coreboot_bus_type);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit coreboot_table_driver_exit(void)
+{
+ platform_driver_unregister(&coreboot_table_driver);
+ bus_unregister(&coreboot_bus_type);
+}
+
+module_init(coreboot_table_driver_init);
+module_exit(coreboot_table_driver_exit);
+
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 6c416955da53..bbe0a7cabb75 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -246,7 +246,9 @@ config FPGA_MGR_VERSAL_FPGA
config FPGA_M10_BMC_SEC_UPDATE
tristate "Intel MAX10 BMC Secure Update driver"
- depends on MFD_INTEL_M10_BMC && FW_UPLOAD
+ depends on MFD_INTEL_M10_BMC
+ select FW_LOADER
+ select FW_UPLOAD
help
Secure update support for the Intel MAX10 board management
controller.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8639a4f9c6e8..2eca58220550 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1293,6 +1293,7 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
u32 reg, u32 v);
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
struct dma_fence *gang);
+bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
index c8935d718207..4485bb29bec9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -41,5 +41,6 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
+ .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 84f44f7e4111..1f76e27f1a35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -171,9 +171,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) ||
(adev && adev->kfd.vram_used + vram_needed >
- adev->gmc.real_vram_size -
- atomic64_read(&adev->vram_pin_size) -
- reserved_for_pt)) {
+ adev->gmc.real_vram_size - reserved_for_pt)) {
ret = -ENOMEM;
goto release;
}
@@ -988,6 +986,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
struct ttm_operation_ctx ctx = { true, false };
+ struct hmm_range *range;
int ret = 0;
mutex_lock(&process_info->lock);
@@ -1017,7 +1016,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
return 0;
}
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
if (ret) {
pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
goto unregister_out;
@@ -1035,7 +1034,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
amdgpu_bo_unreserve(bo);
release_out:
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
unregister_out:
if (ret)
amdgpu_mn_unregister(bo);
@@ -2372,6 +2371,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
/* Go through userptr_inval_list and update any invalid user_pages */
list_for_each_entry(mem, &process_info->userptr_inval_list,
validate_list.head) {
+ struct hmm_range *range;
+
invalid = atomic_read(&mem->invalid);
if (!invalid)
/* BO hasn't been invalidated since the last
@@ -2382,7 +2383,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
bo = mem->bo;
/* Get updated user pages */
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
+ &range);
if (ret) {
pr_debug("Failed %d to get user pages\n", ret);
@@ -2401,7 +2403,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
* FIXME: Cannot ignore the return code, must hold
* notifier_lock
*/
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
}
/* Mark the BO as valid unless it was invalidated
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 2168163aad2d..252a876b0725 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -209,6 +209,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
list_add_tail(&e->tv.head, &bucket[priority]);
e->user_pages = NULL;
+ e->range = NULL;
}
/* Connect the sorted buckets in the output list. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 9caea1688fc3..e4d78491bcc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -26,6 +26,8 @@
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/amdgpu_drm.h>
+struct hmm_range;
+
struct amdgpu_device;
struct amdgpu_bo;
struct amdgpu_bo_va;
@@ -36,6 +38,7 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo_va *bo_va;
uint32_t priority;
struct page **user_pages;
+ struct hmm_range *range;
bool user_invalidated;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 491d4846fc02..cfb262911bfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -328,7 +328,6 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
kfree(amdgpu_connector->edid);
amdgpu_connector->edid = NULL;
- drm_connector_update_edid_property(connector, NULL);
}
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d371000a5727..365e3fb6a9e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -109,6 +109,7 @@ static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
return r;
++(num_ibs[r]);
+ p->gang_leader_idx = r;
return 0;
}
@@ -287,8 +288,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
}
- if (!p->gang_size)
- return -EINVAL;
+ if (!p->gang_size) {
+ ret = -EINVAL;
+ goto free_partial_kdata;
+ }
for (i = 0; i < p->gang_size; ++i) {
ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
@@ -300,7 +303,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
if (ret)
goto free_all_kdata;
}
- p->gang_leader = p->jobs[p->gang_size - 1];
+ p->gang_leader = p->jobs[p->gang_leader_idx];
if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
ret = -ECANCELED;
@@ -910,7 +913,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto out_free_user_pages;
}
- r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
+ r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
if (r) {
kvfree(e->user_pages);
e->user_pages = NULL;
@@ -988,9 +991,10 @@ out_free_user_pages:
if (!e->user_pages)
continue;
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
kvfree(e->user_pages);
e->user_pages = NULL;
+ e->range = NULL;
}
mutex_unlock(&p->bo_list->bo_list_mutex);
return r;
@@ -1195,16 +1199,18 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
return r;
}
- for (i = 0; i < p->gang_size - 1; ++i) {
+ for (i = 0; i < p->gang_size; ++i) {
+ if (p->jobs[i] == leader)
+ continue;
+
r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync);
if (r)
return r;
}
- r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_size - 1]);
+ r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
if (r && r != -ERESTARTSYS)
DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
-
return r;
}
@@ -1238,9 +1244,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
for (i = 0; i < p->gang_size; ++i)
drm_sched_job_arm(&p->jobs[i]->base);
- for (i = 0; i < (p->gang_size - 1); ++i) {
+ for (i = 0; i < p->gang_size; ++i) {
struct dma_fence *fence;
+ if (p->jobs[i] == leader)
+ continue;
+
fence = &p->jobs[i]->base.s_fence->scheduled;
r = amdgpu_sync_fence(&leader->sync, fence);
if (r)
@@ -1265,7 +1274,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
+ e->range = NULL;
}
if (r) {
r = -EAGAIN;
@@ -1276,7 +1286,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
list_for_each_entry(e, &p->validated, tv.head) {
/* Everybody except for the gang leader uses READ */
- for (i = 0; i < (p->gang_size - 1); ++i) {
+ for (i = 0; i < p->gang_size; ++i) {
+ if (p->jobs[i] == leader)
+ continue;
+
dma_resv_add_fence(e->tv.bo->base.resv,
&p->jobs[i]->base.s_fence->finished,
DMA_RESV_USAGE_READ);
@@ -1286,7 +1299,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
e->tv.num_shared = 0;
}
- seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_size - 1],
+ seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
p->fence);
amdgpu_cs_post_dependencies(p);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
index cbaa19b2b8a3..f80adf9069ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
@@ -54,6 +54,7 @@ struct amdgpu_cs_parser {
/* scheduler job objects */
unsigned int gang_size;
+ unsigned int gang_leader_idx;
struct drm_sched_entity *entities[AMDGPU_CS_GANG_SIZE];
struct amdgpu_job *jobs[AMDGPU_CS_GANG_SIZE];
struct amdgpu_job *gang_leader;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 64510898eedd..f1e9663b4051 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -6044,3 +6044,44 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
dma_fence_put(old);
return NULL;
}
+
+bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
+ case CHIP_TOPAZ:
+ /* chips with no display hardware */
+ return false;
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+#endif
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ /* chips with display hardware */
+ return true;
+ default:
+ /* IP discovery */
+ if (!adev->ip_versions[DCE_HWIP][0] ||
+ (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
+ return false;
+ return true;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 8ef31d687ef3..91571b1324f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -378,6 +378,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_userptr *args = data;
struct drm_gem_object *gobj;
+ struct hmm_range *range;
struct amdgpu_bo *bo;
uint32_t handle;
int r;
@@ -413,14 +414,13 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto release_object;
- if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
- r = amdgpu_mn_register(bo, args->addr);
- if (r)
- goto release_object;
- }
+ r = amdgpu_mn_register(bo, args->addr);
+ if (r)
+ goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
+ r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
+ &range);
if (r)
goto release_object;
@@ -443,7 +443,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
user_pages_done:
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
release_object:
drm_gem_object_put(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 34233a74248c..28612e56d0d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -479,6 +479,12 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
unsigned i;
unsigned vmhub, inv_eng;
+ if (adev->enable_mes) {
+ /* reserve engine 5 for firmware */
+ for (vmhub = 0; vmhub < AMDGPU_MAX_VMHUBS; vmhub++)
+ vm_inv_engs[vmhub] &= ~(1 << 5);
+ }
+
for (i = 0; i < adev->num_rings; ++i) {
ring = adev->rings[i];
vmhub = ring->funcs->vmhub;
@@ -656,7 +662,7 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
}
if (amdgpu_sriov_vf(adev) ||
- !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
+ !amdgpu_device_has_display_hardware(adev)) {
size = 0;
} else {
size = amdgpu_gmc_get_vbios_fb_size(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index cd968e781077..adac650cf544 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -169,7 +169,11 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
- dma_fence_put(&job->hw_fence);
+ /* only put the hw fence if has embedded fence */
+ if (!job->hw_fence.ops)
+ kfree(job);
+ else
+ dma_fence_put(&job->hw_fence);
}
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
@@ -254,6 +258,9 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
DRM_ERROR("Error adding fence (%d)\n", r);
}
+ if (!fence && job->gang_submit)
+ fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
+
while (fence == NULL && vm && !job->vmid) {
r = amdgpu_vmid_grab(vm, ring, &job->sync,
&job->base.s_fence->finished,
@@ -264,9 +271,6 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
fence = amdgpu_sync_get_fence(&job->sync);
}
- if (!fence && job->gang_submit)
- fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
-
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index effa7df3ddbf..7978307e1d6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -172,6 +172,7 @@ void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
{
amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
&mem_ctx->shared_buf);
+ mem_ctx->shared_bo = NULL;
}
static void psp_free_shared_bufs(struct psp_context *psp)
@@ -182,6 +183,7 @@ static void psp_free_shared_bufs(struct psp_context *psp)
/* free TMR memory buffer */
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+ psp->tmr_bo = NULL;
/* free xgmi shared memory */
psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
@@ -743,7 +745,7 @@ static int psp_load_toc(struct psp_context *psp,
/* Set up Trusted Memory Region */
static int psp_tmr_init(struct psp_context *psp)
{
- int ret;
+ int ret = 0;
int tmr_size;
void *tmr_buf;
void **pptr;
@@ -770,10 +772,12 @@ static int psp_tmr_init(struct psp_context *psp)
}
}
- pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
- AMDGPU_GEM_DOMAIN_VRAM,
- &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+ if (!psp->tmr_bo) {
+ pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
+ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+ }
return ret;
}
@@ -2732,8 +2736,6 @@ static int psp_suspend(void *handle)
}
out:
- psp_free_shared_bufs(psp);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 57277b1cf183..b64938ed8cb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -643,9 +643,6 @@ struct amdgpu_ttm_tt {
struct task_struct *usertask;
uint32_t userflags;
bool bound;
-#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
- struct hmm_range *range;
-#endif
};
#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
@@ -658,7 +655,8 @@ struct amdgpu_ttm_tt {
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
* once afterwards to stop HMM tracking
*/
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
+ struct hmm_range **range)
{
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
@@ -668,16 +666,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
bool readonly;
int r = 0;
+ /* Make sure get_user_pages_done() can cleanup gracefully */
+ *range = NULL;
+
mm = bo->notifier.mm;
if (unlikely(!mm)) {
DRM_DEBUG_DRIVER("BO is not registered?\n");
return -EFAULT;
}
- /* Another get_user_pages is running at the same time?? */
- if (WARN_ON(gtt->range))
- return -EFAULT;
-
if (!mmget_not_zero(mm)) /* Happens during process shutdown */
return -ESRCH;
@@ -695,7 +692,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
- ttm->num_pages, &gtt->range, readonly,
+ ttm->num_pages, range, readonly,
true, NULL);
out_unlock:
mmap_read_unlock(mm);
@@ -713,30 +710,24 @@ out_unlock:
*
* Returns: true if pages are still valid
*/
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
+bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
+ struct hmm_range *range)
{
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
- bool r = false;
- if (!gtt || !gtt->userptr)
+ if (!gtt || !gtt->userptr || !range)
return false;
DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
gtt->userptr, ttm->num_pages);
- WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
- "No user pages to check\n");
+ WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
- if (gtt->range) {
- /*
- * FIXME: Must always hold notifier_lock for this, and must
- * not ignore the return code.
- */
- r = amdgpu_hmm_range_get_pages_done(gtt->range);
- gtt->range = NULL;
- }
-
- return !r;
+ /*
+ * FIXME: Must always hold notifier_lock for this, and must
+ * not ignore the return code.
+ */
+ return !amdgpu_hmm_range_get_pages_done(range);
}
#endif
@@ -813,20 +804,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
/* unmap the pages mapped to the device */
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
sg_free_table(ttm->sg);
-
-#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
- if (gtt->range) {
- unsigned long i;
-
- for (i = 0; i < ttm->num_pages; i++) {
- if (ttm->pages[i] !=
- hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
- break;
- }
-
- WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
- }
-#endif
}
static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 6a70818039dd..a37207011a69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -39,6 +39,8 @@
#define AMDGPU_POISON 0xd0bed0be
+struct hmm_range;
+
struct amdgpu_gtt_mgr {
struct ttm_resource_manager manager;
struct drm_mm mm;
@@ -149,15 +151,19 @@ void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages);
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
+ struct hmm_range **range);
+bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
+ struct hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct page **pages)
+ struct page **pages,
+ struct hmm_range **range)
{
return -EPERM;
}
-static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
+static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
+ struct hmm_range *range)
{
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 0b52af415b28..ce64ca1c6e66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -156,6 +156,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
break;
case IP_VERSION(3, 0, 2):
fw_name = FIRMWARE_VANGOGH;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
break;
case IP_VERSION(3, 0, 16):
fw_name = FIRMWARE_DIMGREY_CAVEFISH;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
index f772bb499f3e..0312c71c3af9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
@@ -32,7 +32,6 @@
#define RB_ENABLED (1 << 0)
#define RB4_ENABLED (1 << 1)
-#define MMSCH_DOORBELL_OFFSET 0x8
#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 21d822b1d589..88f9b327183a 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 897a5ce9c9da..dcc49b01bd59 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -100,7 +100,6 @@ static int vcn_v4_0_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
- int vcn_doorbell_index = 0;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -112,12 +111,6 @@ static int vcn_v4_0_sw_init(void *handle)
if (r)
return r;
- if (amdgpu_sriov_vf(adev)) {
- vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 - MMSCH_DOORBELL_OFFSET;
- /* get DWORD offset */
- vcn_doorbell_index = vcn_doorbell_index << 1;
- }
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -135,7 +128,7 @@ static int vcn_v4_0_sw_init(void *handle)
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev))
- ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1;
else
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 6925e0280dbe..f4f3d2665a6b 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -5,6 +5,7 @@ menu "Display Engine Configuration"
config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
+ depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64
select SND_HDA_COMPONENT if SND_HDA_CORE
select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128)
help
@@ -12,6 +13,12 @@ config DRM_AMD_DC
support for AMDGPU. This adds required support for Vega and
Raven ASICs.
+ calculate_bandwidth() is presently broken on all !(X86_64 || SPARC64 || ARM64)
+ architectures built with Clang (all released versions), whereby the stack
+ frame gets blown up to well over 5k. This would cause an immediate kernel
+ panic on most architectures. We'll revert this when the following bug report
+ has been resolved: https://github.com/llvm/llvm-project/issues/41896.
+
config DRM_AMD_DC_DCN
def_bool n
help
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 509739d83b5a..512c32327eb1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -147,6 +147,14 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
/* Number of bytes in PSP footer for firmware. */
#define PSP_FOOTER_BYTES 0x100
+/*
+ * DMUB Async to Sync Mechanism Status
+ */
+#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
+#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
+#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
+#define DMUB_ASYNC_TO_SYNC_ACCESS_INVALID 4
+
/**
* DOC: overview
*
@@ -1364,7 +1372,44 @@ static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
},
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+ },
+ },
{}
+ /* TODO: refactor this from a fixed table to a dynamic option */
};
static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
@@ -1637,12 +1682,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
}
- if (amdgpu_dm_initialize_drm_device(adev)) {
- DRM_ERROR(
- "amdgpu: failed to initialize sw for display support.\n");
- goto error;
- }
-
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
* It is expected that DMUB will resend any pending notifications at this point, for
* example HPD from DPIA.
@@ -1650,6 +1689,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (dc_is_dmub_outbox_supported(adev->dm.dc))
dc_enable_dmub_outbox(adev->dm.dc);
+ if (amdgpu_dm_initialize_drm_device(adev)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
/* create fake encoders for MST */
dm_dp_create_fake_mst_encoders(adev);
@@ -6467,7 +6512,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
struct drm_connector_state *new_con_state;
struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state;
- int i, j;
+ int i, j, ret;
int vcpi, pbn_div, pbn, slot_num = 0;
for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -6514,8 +6559,11 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
dm_conn_state->pbn = pbn;
dm_conn_state->vcpi_slots = slot_num;
- drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn,
- false);
+ ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port,
+ dm_conn_state->pbn, false);
+ if (ret < 0)
+ return ret;
+
continue;
}
@@ -9529,10 +9577,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc_resource_is_dsc_encoding_supported(dc)) {
- if (!pre_validate_dsc(state, &dm_state, vars)) {
- ret = -EINVAL;
+ ret = pre_validate_dsc(state, &dm_state, vars);
+ if (ret != 0)
goto fail;
- }
}
#endif
@@ -9627,9 +9674,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
+ ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+ if (ret) {
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
- ret = -EINVAL;
goto fail;
}
@@ -10109,6 +10156,8 @@ static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
*operation_result = AUX_RET_ERROR_TIMEOUT;
} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
+ } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_INVALID) {
+ *operation_result = AUX_RET_ERROR_INVALID_REPLY;
} else {
*operation_result = AUX_RET_ERROR_UNKNOWN;
}
@@ -10156,6 +10205,16 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context
payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
+
+ if (payload->length != adev->dm.dmub_notify->aux_reply.length) {
+ DRM_WARN("invalid read from DPIA AUX %x(%d) got length %d!\n",
+ payload->address, payload->length,
+ adev->dm.dmub_notify->aux_reply.length);
+ return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, ctx,
+ DMUB_ASYNC_TO_SYNC_ACCESS_INVALID,
+ (uint32_t *)operation_result);
+ }
+
memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
adev->dm.dmub_notify->aux_reply.length);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b5ce15c43bcc..635c398fcefe 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -51,12 +51,6 @@
#define AMDGPU_DMUB_NOTIFICATION_MAX 5
/*
- * DMUB Async to Sync Mechanism Status
- */
-#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
-#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
-#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
-/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 594fe8a4d02b..64dd02970292 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -412,7 +412,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
{
struct amdgpu_crtc *acrtc = NULL;
struct drm_plane *cursor_plane;
-
+ bool is_dcn;
int res = -ENOMEM;
cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
@@ -450,8 +450,14 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
acrtc->otg_inst = -1;
dm->adev->mode_info.crtcs[crtc_index] = acrtc;
- drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
+
+ /* Don't enable DRM CRTC degamma property for DCE since it doesn't
+ * support programmable degamma anywhere.
+ */
+ is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
+ drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
+
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 6ff96b4bdda5..6483ba266893 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -703,13 +703,13 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
return dsc_config.bits_per_pixel;
}
-static bool increase_dsc_bpp(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_state *mst_state,
- struct dc_link *dc_link,
- struct dsc_mst_fairness_params *params,
- struct dsc_mst_fairness_vars *vars,
- int count,
- int k)
+static int increase_dsc_bpp(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_params *params,
+ struct dsc_mst_fairness_vars *vars,
+ int count,
+ int k)
{
int i;
bool bpp_increased[MAX_PIPES];
@@ -719,6 +719,7 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
int remaining_to_increase = 0;
int link_timeslots_used;
int fair_pbn_alloc;
+ int ret = 0;
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
@@ -757,52 +758,60 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
if (initial_slack[next_index] > fair_pbn_alloc) {
vars[next_index].pbn += fair_pbn_alloc;
- if (drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
- vars[next_index].pbn) < 0)
- return false;
- if (!drm_dp_mst_atomic_check(state)) {
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret == 0) {
vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
} else {
vars[next_index].pbn -= fair_pbn_alloc;
- if (drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
- vars[next_index].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn);
+ if (ret < 0)
+ return ret;
}
} else {
vars[next_index].pbn += initial_slack[next_index];
- if (drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
- vars[next_index].pbn) < 0)
- return false;
- if (!drm_dp_mst_atomic_check(state)) {
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret == 0) {
vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
} else {
vars[next_index].pbn -= initial_slack[next_index];
- if (drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
- vars[next_index].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn);
+ if (ret < 0)
+ return ret;
}
}
bpp_increased[next_index] = true;
remaining_to_increase--;
}
- return true;
+ return 0;
}
-static bool try_disable_dsc(struct drm_atomic_state *state,
- struct dc_link *dc_link,
- struct dsc_mst_fairness_params *params,
- struct dsc_mst_fairness_vars *vars,
- int count,
- int k)
+static int try_disable_dsc(struct drm_atomic_state *state,
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_params *params,
+ struct dsc_mst_fairness_vars *vars,
+ int count,
+ int k)
{
int i;
bool tried[MAX_PIPES];
@@ -810,6 +819,7 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
int max_kbps_increase;
int next_index;
int remaining_to_try = 0;
+ int ret;
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@@ -840,49 +850,52 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
break;
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
- if (drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
- vars[next_index].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn);
+ if (ret < 0)
+ return ret;
- if (!drm_dp_mst_atomic_check(state)) {
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret == 0) {
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
- if (drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
- vars[next_index].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn);
+ if (ret < 0)
+ return ret;
}
tried[next_index] = true;
remaining_to_try--;
}
- return true;
+ return 0;
}
-static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
- struct dc_state *dc_state,
- struct dc_link *dc_link,
- struct dsc_mst_fairness_vars *vars,
- struct drm_dp_mst_topology_mgr *mgr,
- int *link_vars_start_index)
+static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_vars *vars,
+ struct drm_dp_mst_topology_mgr *mgr,
+ int *link_vars_start_index)
{
struct dc_stream_state *stream;
struct dsc_mst_fairness_params params[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr);
int count = 0;
- int i, k;
+ int i, k, ret;
bool debugfs_overwrite = false;
memset(params, 0, sizeof(params));
if (IS_ERR(mst_state))
- return false;
+ return PTR_ERR(mst_state);
mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -933,7 +946,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (count == 0) {
ASSERT(0);
- return true;
+ return 0;
}
/* k is start index of vars for current phy link used by mst hub */
@@ -947,13 +960,17 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
- if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
- vars[i + k].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
+ vars[i + k].pbn);
+ if (ret < 0)
+ return ret;
}
- if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret == 0 && !debugfs_overwrite) {
set_dsc_configs_from_fairness_vars(params, vars, count, k);
- return true;
+ return 0;
+ } else if (ret != -ENOSPC) {
+ return ret;
}
/* Try max compression */
@@ -962,31 +979,36 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
- if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
- params[i].port, vars[i + k].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+ params[i].port, vars[i + k].pbn);
+ if (ret < 0)
+ return ret;
} else {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
- if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
- params[i].port, vars[i + k].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+ params[i].port, vars[i + k].pbn);
+ if (ret < 0)
+ return ret;
}
}
- if (drm_dp_mst_atomic_check(state))
- return false;
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret != 0)
+ return ret;
/* Optimize degree of compression */
- if (!increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k))
- return false;
+ ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k);
+ if (ret < 0)
+ return ret;
- if (!try_disable_dsc(state, dc_link, params, vars, count, k))
- return false;
+ ret = try_disable_dsc(state, dc_link, params, vars, count, k);
+ if (ret < 0)
+ return ret;
set_dsc_configs_from_fairness_vars(params, vars, count, k);
- return true;
+ return 0;
}
static bool is_dsc_need_re_compute(
@@ -1087,15 +1109,17 @@ static bool is_dsc_need_re_compute(
return is_dsc_need_re_compute;
}
-bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- struct dc_state *dc_state,
- struct dsc_mst_fairness_vars *vars)
+int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
{
int i, j;
struct dc_stream_state *stream;
bool computed_streams[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
int link_vars_start_index = 0;
+ int ret = 0;
for (i = 0; i < dc_state->stream_count; i++)
computed_streams[i] = false;
@@ -1108,7 +1132,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- if (!aconnector || !aconnector->dc_sink)
+ if (!aconnector || !aconnector->dc_sink || !aconnector->port)
continue;
if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
@@ -1118,19 +1142,16 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
continue;
if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
- return false;
+ return -EINVAL;
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
continue;
- mutex_lock(&aconnector->mst_mgr.lock);
- if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
- &aconnector->mst_mgr,
- &link_vars_start_index)) {
- mutex_unlock(&aconnector->mst_mgr.lock);
- return false;
- }
- mutex_unlock(&aconnector->mst_mgr.lock);
+ mst_mgr = aconnector->port->mgr;
+ ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
+ &link_vars_start_index);
+ if (ret != 0)
+ return ret;
for (j = 0; j < dc_state->stream_count; j++) {
if (dc_state->streams[j]->link == stream->link)
@@ -1143,22 +1164,23 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (stream->timing.flags.DSC == 1)
if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
- return false;
+ return -EINVAL;
}
- return true;
+ return ret;
}
-static bool
- pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- struct dc_state *dc_state,
- struct dsc_mst_fairness_vars *vars)
+static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
{
int i, j;
struct dc_stream_state *stream;
bool computed_streams[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
int link_vars_start_index = 0;
+ int ret = 0;
for (i = 0; i < dc_state->stream_count; i++)
computed_streams[i] = false;
@@ -1171,7 +1193,7 @@ static bool
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- if (!aconnector || !aconnector->dc_sink)
+ if (!aconnector || !aconnector->dc_sink || !aconnector->port)
continue;
if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
@@ -1183,14 +1205,11 @@ static bool
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
continue;
- mutex_lock(&aconnector->mst_mgr.lock);
- if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
- &aconnector->mst_mgr,
- &link_vars_start_index)) {
- mutex_unlock(&aconnector->mst_mgr.lock);
- return false;
- }
- mutex_unlock(&aconnector->mst_mgr.lock);
+ mst_mgr = aconnector->port->mgr;
+ ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
+ &link_vars_start_index);
+ if (ret != 0)
+ return ret;
for (j = 0; j < dc_state->stream_count; j++) {
if (dc_state->streams[j]->link == stream->link)
@@ -1198,7 +1217,7 @@ static bool
}
}
- return true;
+ return ret;
}
static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state,
@@ -1253,9 +1272,9 @@ static bool is_dsc_precompute_needed(struct drm_atomic_state *state)
return ret;
}
-bool pre_validate_dsc(struct drm_atomic_state *state,
- struct dm_atomic_state **dm_state_ptr,
- struct dsc_mst_fairness_vars *vars)
+int pre_validate_dsc(struct drm_atomic_state *state,
+ struct dm_atomic_state **dm_state_ptr,
+ struct dsc_mst_fairness_vars *vars)
{
int i;
struct dm_atomic_state *dm_state;
@@ -1264,11 +1283,12 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
if (!is_dsc_precompute_needed(state)) {
DRM_INFO_ONCE("DSC precompute is not needed.\n");
- return true;
+ return 0;
}
- if (dm_atomic_get_state(state, dm_state_ptr)) {
+ ret = dm_atomic_get_state(state, dm_state_ptr);
+ if (ret != 0) {
DRM_INFO_ONCE("dm_atomic_get_state() failed\n");
- return false;
+ return ret;
}
dm_state = *dm_state_ptr;
@@ -1280,7 +1300,7 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL);
if (!local_dc_state)
- return false;
+ return -ENOMEM;
for (i = 0; i < local_dc_state->stream_count; i++) {
struct dc_stream_state *stream = dm_state->context->streams[i];
@@ -1316,9 +1336,9 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
if (ret != 0)
goto clean_exit;
- if (!pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars)) {
+ ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
+ if (ret != 0) {
DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
- ret = -EINVAL;
goto clean_exit;
}
@@ -1349,7 +1369,7 @@ clean_exit:
kfree(local_dc_state);
- return (ret == 0);
+ return ret;
}
static unsigned int kbps_from_pbn(unsigned int pbn)
@@ -1392,6 +1412,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
unsigned int max_compressed_bw_in_kbps = 0;
struct dc_dsc_bw_range bw_range = {0};
+ struct drm_dp_mst_topology_mgr *mst_mgr;
/*
* check if the mode could be supported if DSC pass-through is supported
@@ -1400,7 +1421,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
*/
if (is_dsc_common_config_possible(stream, &bw_range) &&
aconnector->port->passthrough_aux) {
- mutex_lock(&aconnector->mst_mgr.lock);
+ mst_mgr = aconnector->port->mgr;
+ mutex_lock(&mst_mgr->lock);
cur_link_settings = stream->link->verified_link_cap;
@@ -1413,7 +1435,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
down_link_bw_in_kbps);
- mutex_unlock(&aconnector->mst_mgr.lock);
+ mutex_unlock(&mst_mgr->lock);
/*
* use the maximum dsc compression bandwidth as the required
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index b92a7c5671aa..97fd70df531b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -53,15 +53,15 @@ struct dsc_mst_fairness_vars {
struct amdgpu_dm_connector *aconnector;
};
-bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- struct dc_state *dc_state,
- struct dsc_mst_fairness_vars *vars);
+int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars);
bool needs_dsc_aux_workaround(struct dc_link *link);
-bool pre_validate_dsc(struct drm_atomic_state *state,
- struct dm_atomic_state **dm_state_ptr,
- struct dsc_mst_fairness_vars *vars);
+int pre_validate_dsc(struct drm_atomic_state *state,
+ struct dm_atomic_state **dm_state_ptr,
+ struct dsc_mst_fairness_vars *vars);
enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index ee0456b5e14e..e0c8d6f09bb4 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -2393,6 +2393,26 @@ static enum bp_result get_vram_info_v25(
return result;
}
+static enum bp_result get_vram_info_v30(
+ struct bios_parser *bp,
+ struct dc_vram_info *info)
+{
+ struct atom_vram_info_header_v3_0 *info_v30;
+ enum bp_result result = BP_RESULT_OK;
+
+ info_v30 = GET_IMAGE(struct atom_vram_info_header_v3_0,
+ DATA_TABLES(vram_info));
+
+ if (info_v30 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->num_chans = info_v30->channel_num;
+ info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8;
+
+ return result;
+}
+
+
/*
* get_integrated_info_v11
*
@@ -3060,6 +3080,16 @@ static enum bp_result bios_parser_get_vram_info(
}
break;
+ case 3:
+ switch (revision.minor) {
+ case 0:
+ result = get_vram_info_v30(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+
default:
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index ef0795b14a1f..2db595672a46 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -123,9 +123,10 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
uint32_t result;
result = dcn314_smu_wait_for_response(clk_mgr, 10, 200000);
- ASSERT(result == VBIOSSMC_Result_OK);
- smu_print("SMU response after wait: %d\n", result);
+ if (result != VBIOSSMC_Result_OK)
+ smu_print("SMU Response was not OK. SMU response after wait received is: %d\n",
+ result);
if (result == VBIOSSMC_Status_BUSY)
return -1;
@@ -216,6 +217,12 @@ int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
+#ifdef DBG
+ smu_print("actual_dcfclk_set_mhz %d is set to : %d\n",
+ actual_dcfclk_set_mhz,
+ actual_dcfclk_set_mhz * 1000);
+#endif
+
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 1b70b78e2fa1..af631085e88c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -359,7 +359,8 @@ static const struct dce_audio_registers audio_regs[] = {
audio_regs(2),
audio_regs(3),
audio_regs(4),
- audio_regs(5)
+ audio_regs(5),
+ audio_regs(6),
};
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index b9765b3899e1..ef52e6b6eccf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -436,34 +436,48 @@ void dpp1_set_cursor_position(
uint32_t height)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
- int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
+ int x_pos = pos->x - param->viewport.x;
+ int y_pos = pos->y - param->viewport.y;
+ int x_hotspot = pos->x_hotspot;
+ int y_hotspot = pos->y_hotspot;
+ int src_x_offset = x_pos - pos->x_hotspot;
+ int src_y_offset = y_pos - pos->y_hotspot;
+ int cursor_height = (int)height;
+ int cursor_width = (int)width;
uint32_t cur_en = pos->enable ? 1 : 0;
- // Cursor width/height and hotspots need to be rotated for offset calculation
+ // Transform cursor width / height and hotspots for offset calculations
if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
- swap(width, height);
+ swap(cursor_height, cursor_width);
+ swap(x_hotspot, y_hotspot);
+
if (param->rotation == ROTATION_ANGLE_90) {
- src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
- src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+ // hotspot = (-y, x)
+ src_x_offset = x_pos - (cursor_width - x_hotspot);
+ src_y_offset = y_pos - y_hotspot;
+ } else if (param->rotation == ROTATION_ANGLE_270) {
+ // hotspot = (y, -x)
+ src_x_offset = x_pos - x_hotspot;
+ src_y_offset = y_pos - (cursor_height - y_hotspot);
}
} else if (param->rotation == ROTATION_ANGLE_180) {
+ // hotspot = (-x, -y)
if (!param->mirror)
- src_x_offset = pos->x - param->viewport.x;
+ src_x_offset = x_pos - (cursor_width - x_hotspot);
- src_y_offset = pos->y - param->viewport.y;
+ src_y_offset = y_pos - (cursor_height - y_hotspot);
}
if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
- if (src_x_offset + (int)width <= 0)
+ if (src_x_offset + cursor_width <= 0)
cur_en = 0; /* not visible beyond left edge*/
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
- if (src_y_offset + (int)height <= 0)
+ if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
REG_UPDATE(CURSOR0_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 52e201e9b091..a142a00bc432 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1179,10 +1179,12 @@ void hubp1_cursor_set_position(
const struct dc_cursor_mi_param *param)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
- int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
+ int x_pos = pos->x - param->viewport.x;
+ int y_pos = pos->y - param->viewport.y;
int x_hotspot = pos->x_hotspot;
int y_hotspot = pos->y_hotspot;
+ int src_x_offset = x_pos - pos->x_hotspot;
+ int src_y_offset = y_pos - pos->y_hotspot;
int cursor_height = (int)hubp->curs_attr.height;
int cursor_width = (int)hubp->curs_attr.width;
uint32_t dst_x_offset;
@@ -1200,18 +1202,26 @@ void hubp1_cursor_set_position(
if (hubp->curs_attr.address.quad_part == 0)
return;
- // Rotated cursor width/height and hotspots tweaks for offset calculation
+ // Transform cursor width / height and hotspots for offset calculations
if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
swap(cursor_height, cursor_width);
+ swap(x_hotspot, y_hotspot);
+
if (param->rotation == ROTATION_ANGLE_90) {
- src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
- src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+ // hotspot = (-y, x)
+ src_x_offset = x_pos - (cursor_width - x_hotspot);
+ src_y_offset = y_pos - y_hotspot;
+ } else if (param->rotation == ROTATION_ANGLE_270) {
+ // hotspot = (y, -x)
+ src_x_offset = x_pos - x_hotspot;
+ src_y_offset = y_pos - (cursor_height - y_hotspot);
}
} else if (param->rotation == ROTATION_ANGLE_180) {
+ // hotspot = (-x, -y)
if (!param->mirror)
- src_x_offset = pos->x - param->viewport.x;
+ src_x_offset = x_pos - (cursor_width - x_hotspot);
- src_y_offset = pos->y - param->viewport.y;
+ src_y_offset = y_pos - (cursor_height - y_hotspot);
}
dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
@@ -1248,8 +1258,8 @@ void hubp1_cursor_set_position(
CURSOR_Y_POSITION, pos->y);
REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, x_hotspot,
- CURSOR_HOT_SPOT_Y, y_hotspot);
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
REG_SET(CURSOR_DST_OFFSET, 0,
CURSOR_DST_X_OFFSET, dst_x_offset);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 938dba5249d4..4566bc7abf17 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -973,10 +973,12 @@ void hubp2_cursor_set_position(
const struct dc_cursor_mi_param *param)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
- int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
+ int x_pos = pos->x - param->viewport.x;
+ int y_pos = pos->y - param->viewport.y;
int x_hotspot = pos->x_hotspot;
int y_hotspot = pos->y_hotspot;
+ int src_x_offset = x_pos - pos->x_hotspot;
+ int src_y_offset = y_pos - pos->y_hotspot;
int cursor_height = (int)hubp->curs_attr.height;
int cursor_width = (int)hubp->curs_attr.width;
uint32_t dst_x_offset;
@@ -994,18 +996,26 @@ void hubp2_cursor_set_position(
if (hubp->curs_attr.address.quad_part == 0)
return;
- // Rotated cursor width/height and hotspots tweaks for offset calculation
+ // Transform cursor width / height and hotspots for offset calculations
if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
swap(cursor_height, cursor_width);
+ swap(x_hotspot, y_hotspot);
+
if (param->rotation == ROTATION_ANGLE_90) {
- src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
- src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+ // hotspot = (-y, x)
+ src_x_offset = x_pos - (cursor_width - x_hotspot);
+ src_y_offset = y_pos - y_hotspot;
+ } else if (param->rotation == ROTATION_ANGLE_270) {
+ // hotspot = (y, -x)
+ src_x_offset = x_pos - x_hotspot;
+ src_y_offset = y_pos - (cursor_height - y_hotspot);
}
} else if (param->rotation == ROTATION_ANGLE_180) {
+ // hotspot = (-x, -y)
if (!param->mirror)
- src_x_offset = pos->x - param->viewport.x;
+ src_x_offset = x_pos - (cursor_width - x_hotspot);
- src_y_offset = pos->y - param->viewport.y;
+ src_y_offset = y_pos - (cursor_height - y_hotspot);
}
dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
@@ -1042,8 +1052,8 @@ void hubp2_cursor_set_position(
CURSOR_Y_POSITION, pos->y);
REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, x_hotspot,
- CURSOR_HOT_SPOT_Y, y_hotspot);
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
REG_SET(CURSOR_DST_OFFSET, 0,
CURSOR_DST_X_OFFSET, dst_x_offset);
@@ -1052,8 +1062,8 @@ void hubp2_cursor_set_position(
hubp->pos.cur_ctl.bits.cur_enable = cur_en;
hubp->pos.position.bits.x_pos = pos->x;
hubp->pos.position.bits.y_pos = pos->y;
- hubp->pos.hot_spot.bits.x_hot = x_hotspot;
- hubp->pos.hot_spot.bits.y_hot = y_hotspot;
+ hubp->pos.hot_spot.bits.x_hot = pos->x_hotspot;
+ hubp->pos.hot_spot.bits.y_hot = pos->y_hotspot;
hubp->pos.dst_offset.bits.dst_x_offset = dst_x_offset;
/* Cursor Rectangle Cache
* Cursor bitmaps have different hotspot values
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
index 84e1486f3d51..39a57bcd7866 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
@@ -87,6 +87,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_init = hubp3_init,
.set_unbounded_requesting = hubp31_set_unbounded_requesting,
.hubp_soft_reset = hubp31_soft_reset,
+ .hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
index 1bd7e0f327d8..389a8938ee45 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
@@ -96,6 +96,13 @@ static void dccg314_set_pixel_rate_div(
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
+ // Don't program 0xF into the register field. Not valid since
+ // K1 / K2 field is only 1 / 2 bits wide
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA || (k1 == cur_k1 && k2 == cur_k2))
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 588c1c71241f..a0741794db62 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -348,10 +348,8 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
- return odm_combine_factor;
-
if (is_dp_128b_132b_signal(pipe_ctx)) {
+ *k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
@@ -359,7 +357,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
*k2_div = PIXEL_RATE_DIV_BY_2;
else
*k2_div = PIXEL_RATE_DIV_BY_4;
- } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ } else if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
if (two_pix_per_container) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_2;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
index 47eb162f1a75..7dd36e402bac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
@@ -237,7 +237,7 @@ static struct timing_generator_funcs dcn314_tg_funcs = {
.clear_optc_underflow = optc1_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
- .configure_crc = optc2_configure_crc,
+ .configure_crc = optc1_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
.get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index e4daed44ef5f..df4f25119142 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -96,8 +96,10 @@ static void dccg32_set_pixel_rate_div(
// Don't program 0xF into the register field. Not valid since
// K1 / K2 field is only 1 / 2 bits wide
- if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA)
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA) {
+ BREAK_TO_DEBUGGER();
return;
+ }
dccg32_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
if (k1 == cur_k1 && k2 == cur_k2)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index cf5bd9713f54..d0b46a3e0155 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -283,8 +283,7 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
using the max for calculation */
if (hubp->curs_attr.width > 0) {
- // Round cursor width to next multiple of 64
- cursor_size = (((hubp->curs_attr.width + 63) / 64) * 64) * hubp->curs_attr.height;
+ cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
switch (pipe->stream->cursor_attributes.color_format) {
case CURSOR_MODE_MONO:
@@ -309,9 +308,9 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
cursor_size > 16384) {
/* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
*/
- cache_lines_used += (((hubp->curs_attr.width * hubp->curs_attr.height * cursor_bpp +
- DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES) *
- DCN3_2_MALL_MBLK_SIZE_BYTES) / dc->caps.cache_line_size + 2;
+ cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /
+ DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) /
+ dc->caps.cache_line_size + 2;
}
break;
}
@@ -727,10 +726,7 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
struct hubp *hubp = pipe->plane_res.hubp;
if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
- //Round cursor width up to next multiple of 64
- int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64;
- int cursor_height = hubp->curs_attr.height;
- int cursor_size = cursor_width * cursor_height;
+ int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
switch (hubp->curs_attr.color_format) {
case CURSOR_MODE_MONO:
@@ -1175,10 +1171,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
- return odm_combine_factor;
-
if (is_dp_128b_132b_signal(pipe_ctx)) {
+ *k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index b03a7814e96d..fa3778849db1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -111,7 +111,7 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
/* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
- mall_alloc_height_blk_aligned = (pipe->stream->timing.v_addressable - 1 + mblk_height - 1) /
+ mall_alloc_height_blk_aligned = (pipe->plane_res.scl_data.viewport.height - 1 + mblk_height - 1) /
mblk_height * mblk_height + mblk_height;
/* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 659323ebd79d..2abe3967f7fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -157,7 +157,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.dispclk_dppclk_vco_speed_mhz = 4300.0,
.do_urgent_latency_adjustment = true,
.urgent_latency_adjustment_fabric_clock_component_us = 1.0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
};
void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
@@ -211,7 +211,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
/* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
if (clk_mgr->base.ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true;
- clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 38;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
@@ -221,7 +221,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
clk_mgr->base.bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz * 16;
- clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 38;
+ clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50;
clk_mgr->base.bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[1].memclk_mhz * 16;
clk_mgr->base.bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
clk_mgr->base.bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz * 16;
@@ -1803,6 +1803,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
*/
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ /* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so
+ * prefetch is scheduled correctly to account for dummy pstate.
+ */
+ if (dummy_latency_index == 0)
+ context->bw_ctx.dml.soc.fclk_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
@@ -1904,7 +1910,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] ==
dm_dram_clock_change_unsupported) {
- int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries - 1;
+ int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1;
min_dram_speed_mts =
dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;
@@ -1990,6 +1996,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && dummy_latency_index == 0)
+ context->bw_ctx.dml.soc.fclk_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
+
dcn32_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
if (!pstate_en)
@@ -1997,8 +2007,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(dc, context);
+ if (dummy_latency_index == 0)
+ context->bw_ctx.dml.soc.fclk_change_latency_us =
+ dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
+ }
}
static void dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 244fd15d24b4..9afd9ba23fb2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -718,6 +718,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
do {
MaxTotalRDBandwidth = 0;
+ DestinationLineTimesForPrefetchLessThan2 = false;
+ VRatioPrefetchMoreThanMax = false;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Start loop: VStartup = %d\n", __func__, mode_lib->vba.VStartupLines);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
index f82e14cd9d8a..c8b28c83ddf4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
@@ -46,6 +46,8 @@
// Prefetch schedule max vratio
#define __DML_MAX_VRATIO_PRE__ 4.0
+#define __DML_VBA_MAX_DST_Y_PRE__ 63.75
+
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 635fc54338fa..debe46b24a3e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -3475,7 +3475,6 @@ bool dml32_CalculatePrefetchSchedule(
double min_Lsw;
double Tsw_est1 = 0;
double Tsw_est3 = 0;
- double TPreMargin = 0;
if (v->GPUVMEnable == true && v->HostVMEnable == true)
HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
@@ -3669,6 +3668,7 @@ bool dml32_CalculatePrefetchSchedule(
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime -
(*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, __DML_VBA_MAX_DST_Y_PRE__);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal);
dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw);
@@ -3701,8 +3701,6 @@ bool dml32_CalculatePrefetchSchedule(
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
-
- TPreMargin = Tpre_rounded - TPreReq;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, dst_y_prefetch_equ);
dml_print("DML::%s: LineTime: %f\n", __func__, LineTime);
@@ -3730,7 +3728,8 @@ bool dml32_CalculatePrefetchSchedule(
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
- if (dst_y_prefetch_equ > 1 && TPreMargin > 0.0) {
+ if (dst_y_prefetch_equ > 1 &&
+ (Tpre_rounded >= TPreReq || dst_y_prefetch_equ == __DML_VBA_MAX_DST_Y_PRE__)) {
double PrefetchBandwidth1;
double PrefetchBandwidth2;
double PrefetchBandwidth3;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 432b4ecd01a7..f4b176599be7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -126,9 +126,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
.sr_enter_plus_exit_z8_time_us = 320,
.writeback_latency_us = 12.0,
.round_trip_ping_latency_dcfclk_cycles = 263,
- .urgent_latency_pixel_data_only_us = 9.35,
- .urgent_latency_pixel_mixed_with_vm_data_us = 9.35,
- .urgent_latency_vm_data_only_us = 9.35,
+ .urgent_latency_pixel_data_only_us = 4,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4,
+ .urgent_latency_vm_data_only_us = 4,
.fclk_change_latency_us = 20,
.usr_retraining_latency_us = 2,
.smn_latency_us = 2,
@@ -156,7 +156,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
.dispclk_dppclk_vco_speed_mhz = 4300.0,
.do_urgent_latency_adjustment = true,
.urgent_latency_adjustment_fabric_clock_component_us = 1.0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
};
static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 4fe75dd2b329..b880f4d7d67e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1156,22 +1156,21 @@ static int smu_smc_hw_setup(struct smu_context *smu)
uint64_t features_supported;
int ret = 0;
- if (adev->in_suspend && smu_is_dpm_running(smu)) {
- dev_info(adev->dev, "dpm has been enabled\n");
- /* this is needed specifically */
- switch (adev->ip_versions[MP1_HWIP][0]) {
- case IP_VERSION(11, 0, 7):
- case IP_VERSION(11, 0, 11):
- case IP_VERSION(11, 5, 0):
- case IP_VERSION(11, 0, 12):
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 0, 12):
+ if (adev->in_suspend && smu_is_dpm_running(smu)) {
+ dev_info(adev->dev, "dpm has been enabled\n");
ret = smu_system_features_control(smu, true);
if (ret)
dev_err(adev->dev, "Failed system features control!\n");
- break;
- default:
- break;
+ return ret;
}
- return ret;
+ break;
+ default:
+ break;
}
ret = smu_init_display_count(smu, 0);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index e2fa3b066b96..f816b1dd110e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1388,6 +1388,14 @@ enum smu_cmn2asic_mapping_type {
CMN2ASIC_MAPPING_WORKLOAD,
};
+enum smu_baco_seq {
+ BACO_SEQ_BACO = 0,
+ BACO_SEQ_MSR,
+ BACO_SEQ_BAMACO,
+ BACO_SEQ_ULPS,
+ BACO_SEQ_COUNT,
+};
+
#define MSG_MAP(msg, index, valid_in_vf) \
[SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
index 25c08f963f49..d6b13933a98f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
@@ -25,10 +25,10 @@
// *** IMPORTANT ***
// PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION 0x2C
+#define SMU13_DRIVER_IF_VERSION 0x35
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x20
+#define PPTABLE_VERSION 0x27
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -96,7 +96,7 @@
#define FEATURE_MEM_TEMP_READ_BIT 47
#define FEATURE_ATHUB_MMHUB_PG_BIT 48
#define FEATURE_SOC_PCC_BIT 49
-#define FEATURE_SPARE_50_BIT 50
+#define FEATURE_EDC_PWRBRK_BIT 50
#define FEATURE_SPARE_51_BIT 51
#define FEATURE_SPARE_52_BIT 52
#define FEATURE_SPARE_53_BIT 53
@@ -282,15 +282,15 @@ typedef enum {
} I2cControllerPort_e;
typedef enum {
- I2C_CONTROLLER_NAME_VR_GFX = 0,
- I2C_CONTROLLER_NAME_VR_SOC,
- I2C_CONTROLLER_NAME_VR_VMEMP,
- I2C_CONTROLLER_NAME_VR_VDDIO,
- I2C_CONTROLLER_NAME_LIQUID0,
- I2C_CONTROLLER_NAME_LIQUID1,
- I2C_CONTROLLER_NAME_PLX,
- I2C_CONTROLLER_NAME_OTHER,
- I2C_CONTROLLER_NAME_COUNT,
+ I2C_CONTROLLER_NAME_VR_GFX = 0,
+ I2C_CONTROLLER_NAME_VR_SOC,
+ I2C_CONTROLLER_NAME_VR_VMEMP,
+ I2C_CONTROLLER_NAME_VR_VDDIO,
+ I2C_CONTROLLER_NAME_LIQUID0,
+ I2C_CONTROLLER_NAME_LIQUID1,
+ I2C_CONTROLLER_NAME_PLX,
+ I2C_CONTROLLER_NAME_FAN_INTAKE,
+ I2C_CONTROLLER_NAME_COUNT,
} I2cControllerName_e;
typedef enum {
@@ -302,6 +302,7 @@ typedef enum {
I2C_CONTROLLER_THROTTLER_LIQUID0,
I2C_CONTROLLER_THROTTLER_LIQUID1,
I2C_CONTROLLER_THROTTLER_PLX,
+ I2C_CONTROLLER_THROTTLER_FAN_INTAKE,
I2C_CONTROLLER_THROTTLER_INA3221,
I2C_CONTROLLER_THROTTLER_COUNT,
} I2cControllerThrottler_e;
@@ -309,8 +310,9 @@ typedef enum {
typedef enum {
I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
I2C_CONTROLLER_PROTOCOL_VR_IR35217,
- I2C_CONTROLLER_PROTOCOL_TMP_TMP102A,
+ I2C_CONTROLLER_PROTOCOL_TMP_MAX31875,
I2C_CONTROLLER_PROTOCOL_INA3221,
+ I2C_CONTROLLER_PROTOCOL_TMP_MAX6604,
I2C_CONTROLLER_PROTOCOL_COUNT,
} I2cControllerProtocol_e;
@@ -690,6 +692,9 @@ typedef struct {
#define PP_OD_FEATURE_UCLK_BIT 8
#define PP_OD_FEATURE_ZERO_FAN_BIT 9
#define PP_OD_FEATURE_TEMPERATURE_BIT 10
+#define PP_OD_FEATURE_POWER_FEATURE_CTRL_BIT 11
+#define PP_OD_FEATURE_ASIC_TDC_BIT 12
+#define PP_OD_FEATURE_COUNT 13
typedef enum {
PP_OD_POWER_FEATURE_ALWAYS_ENABLED,
@@ -697,6 +702,11 @@ typedef enum {
PP_OD_POWER_FEATURE_ALWAYS_DISABLED,
} PP_OD_POWER_FEATURE_e;
+typedef enum {
+ FAN_MODE_AUTO = 0,
+ FAN_MODE_MANUAL_LINEAR,
+} FanMode_e;
+
typedef struct {
uint32_t FeatureCtrlMask;
@@ -708,8 +718,8 @@ typedef struct {
uint8_t RuntimePwrSavingFeaturesCtrl;
//Frequency changes
- int16_t GfxclkFmin; // MHz
- int16_t GfxclkFmax; // MHz
+ int16_t GfxclkFmin; // MHz
+ int16_t GfxclkFmax; // MHz
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
@@ -730,7 +740,12 @@ typedef struct {
uint8_t MaxOpTemp;
uint8_t Padding[4];
- uint32_t Spare[12];
+ uint16_t GfxVoltageFullCtrlMode;
+ uint16_t GfxclkFullCtrlMode;
+ uint16_t UclkFullCtrlMode;
+ int16_t AsicTdc;
+
+ uint32_t Spare[10];
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
} OverDriveTable_t;
@@ -748,8 +763,8 @@ typedef struct {
uint8_t IdlePwrSavingFeaturesCtrl;
uint8_t RuntimePwrSavingFeaturesCtrl;
- uint16_t GfxclkFmin; // MHz
- uint16_t GfxclkFmax; // MHz
+ int16_t GfxclkFmin; // MHz
+ int16_t GfxclkFmax; // MHz
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
@@ -769,7 +784,12 @@ typedef struct {
uint8_t MaxOpTemp;
uint8_t Padding[4];
- uint32_t Spare[12];
+ uint16_t GfxVoltageFullCtrlMode;
+ uint16_t GfxclkFullCtrlMode;
+ uint16_t UclkFullCtrlMode;
+ int16_t AsicTdc;
+
+ uint32_t Spare[10];
} OverDriveLimits_t;
@@ -903,7 +923,8 @@ typedef struct {
uint16_t FanStartTempMin;
uint16_t FanStartTempMax;
- uint32_t Spare[12];
+ uint16_t PowerMinPpt0[POWER_SOURCE_COUNT];
+ uint32_t Spare[11];
} MsgLimits_t;
@@ -1086,11 +1107,13 @@ typedef struct {
uint32_t GfxoffSpare[15];
// GFX GPO
- float DfllBtcMasterScalerM;
+ uint32_t DfllBtcMasterScalerM;
int32_t DfllBtcMasterScalerB;
- float DfllBtcSlaveScalerM;
+ uint32_t DfllBtcSlaveScalerM;
int32_t DfllBtcSlaveScalerB;
- uint32_t GfxGpoSpare[12];
+ uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg
+ uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg
+ uint32_t GfxGpoSpare[10];
// GFX DCS
@@ -1106,7 +1129,10 @@ typedef struct {
uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin.
- uint32_t DcsSpare[16];
+ uint32_t DcsSpare[14];
+
+ // UCLK section
+ uint16_t ShadowFreqTableUclk[NUM_UCLK_DPM_LEVELS]; // In MHz
// UCLK section
uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations
@@ -1163,13 +1189,14 @@ typedef struct {
uint16_t IntakeTempHighIntakeAcousticLimit;
uint16_t IntakeTempAcouticLimitReleaseRate;
- uint16_t FanStalledTempLimitOffset;
+ int16_t FanAbnormalTempLimitOffset;
uint16_t FanStalledTriggerRpm;
- uint16_t FanAbnormalTriggerRpm;
- uint16_t FanPadding;
-
- uint32_t FanSpare[14];
+ uint16_t FanAbnormalTriggerRpmCoeff;
+ uint16_t FanAbnormalDetectionEnable;
+ uint8_t FanIntakeSensorSupport;
+ uint8_t FanIntakePadding[3];
+ uint32_t FanSpare[13];
// SECTION: VDD_GFX AVFS
uint8_t OverrideGfxAvfsFuses;
@@ -1193,7 +1220,6 @@ typedef struct {
uint32_t dGbV_dT_vmin;
uint32_t dGbV_dT_vmax;
- //Unused: PMFW-9370
uint32_t V2F_vmin_range_low;
uint32_t V2F_vmin_range_high;
uint32_t V2F_vmax_range_low;
@@ -1238,8 +1264,21 @@ typedef struct {
// SECTION: Advanced Options
uint32_t DebugOverrides;
+ // Section: Total Board Power idle vs active coefficients
+ uint8_t TotalBoardPowerSupport;
+ uint8_t TotalBoardPowerPadding[3];
+
+ int16_t TotalIdleBoardPowerM;
+ int16_t TotalIdleBoardPowerB;
+ int16_t TotalBoardPowerM;
+ int16_t TotalBoardPowerB;
+
+ QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT];
+ QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT];
+ QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT];
+
// SECTION: Sku Reserved
- uint32_t Spare[64];
+ uint32_t Spare[43];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
@@ -1304,7 +1343,8 @@ typedef struct {
// SECTION: Clock Spread Spectrum
// UCLK Spread Spectrum
- uint16_t UclkSpreadPadding;
+ uint8_t UclkTrainingModeSpreadPercent; // Q4.4
+ uint8_t UclkSpreadPadding;
uint16_t UclkSpreadFreq; // kHz
// UCLK Spread Spectrum
@@ -1317,11 +1357,7 @@ typedef struct {
// Section: Memory Config
uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
- uint8_t PaddingMem1[3];
-
- // Section: Total Board Power
- uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
- uint16_t BoardPowerPadding;
+ uint8_t PaddingMem1[7];
// SECTION: UMC feature flags
uint8_t HsrEnabled;
@@ -1423,8 +1459,11 @@ typedef struct {
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
- uint16_t AverageSocketPower ;
+ uint16_t AverageSocketPower;
+ uint16_t AverageTotalBoardPower;
+
uint16_t AvgTemperature[TEMP_COUNT];
+ uint16_t AvgTemperatureFanIntake;
uint8_t PcieRate ;
uint8_t PcieWidth ;
@@ -1592,5 +1631,7 @@ typedef struct {
#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5
#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6
#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
+#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
+#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index a9215494dcdd..d466db6f0ad4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -147,14 +147,6 @@ struct smu_11_5_power_context {
uint32_t max_fast_ppt_limit;
};
-enum smu_v11_0_baco_seq {
- BACO_SEQ_BACO = 0,
- BACO_SEQ_MSR,
- BACO_SEQ_BAMACO,
- BACO_SEQ_ULPS,
- BACO_SEQ_COUNT,
-};
-
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
int smu_v11_0_init_microcode(struct smu_context *smu);
@@ -257,7 +249,7 @@ int smu_v11_0_baco_enter(struct smu_context *smu);
int smu_v11_0_baco_exit(struct smu_context *smu);
int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
- enum smu_v11_0_baco_seq baco_seq);
+ enum smu_baco_seq baco_seq);
int smu_v11_0_mode1_reset(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 80fb583b18d9..865d6358918d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -31,7 +31,7 @@
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x35
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@@ -124,14 +124,6 @@ struct smu_13_0_power_context {
enum smu_13_0_power_state power_state;
};
-enum smu_v13_0_baco_seq {
- BACO_SEQ_BACO = 0,
- BACO_SEQ_MSR,
- BACO_SEQ_BAMACO,
- BACO_SEQ_ULPS,
- BACO_SEQ_COUNT,
-};
-
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
int smu_v13_0_init_microcode(struct smu_context *smu);
@@ -218,6 +210,9 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu);
int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks);
+int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq);
+
bool smu_v13_0_baco_is_support(struct smu_context *smu);
enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 6212fd270857..697e98a0a20a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -379,6 +379,10 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
((adev->pdev->device == 0x73BF) &&
(adev->pdev->revision == 0xCF)) ||
((adev->pdev->device == 0x7422) &&
+ (adev->pdev->revision == 0x00)) ||
+ ((adev->pdev->device == 0x73A3) &&
+ (adev->pdev->revision == 0x00)) ||
+ ((adev->pdev->device == 0x73E3) &&
(adev->pdev->revision == 0x00)))
smu_baco->platform_support = false;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index dccbd9f70723..70b560737687 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1576,7 +1576,7 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
}
int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
- enum smu_v11_0_baco_seq baco_seq)
+ enum smu_baco_seq baco_seq)
{
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 43fb102a65f5..89f0f6eb19f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -2230,6 +2230,15 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
return ret;
}
+int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ArmD3,
+ baco_seq,
+ NULL);
+}
+
bool smu_v13_0_baco_is_support(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 29529328152d..f0121d171630 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -120,6 +120,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -1566,6 +1567,31 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
NULL);
}
+static int smu_v13_0_0_baco_enter(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
+ return smu_v13_0_baco_set_armd3_sequence(smu,
+ smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+ else
+ return smu_v13_0_baco_enter(smu);
+}
+
+static int smu_v13_0_0_baco_exit(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ /* Wait for PMFW handling for the Dstate change */
+ usleep_range(10000, 11000);
+ return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ } else {
+ return smu_v13_0_baco_exit(smu);
+ }
+}
+
static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -1827,8 +1853,8 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.baco_is_support = smu_v13_0_baco_is_support,
.baco_get_state = smu_v13_0_baco_get_state,
.baco_set_state = smu_v13_0_baco_set_state,
- .baco_enter = smu_v13_0_baco_enter,
- .baco_exit = smu_v13_0_baco_exit,
+ .baco_enter = smu_v13_0_0_baco_enter,
+ .baco_exit = smu_v13_0_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_0_set_mp1_state,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c4102cfb734c..d74debc584f8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -122,6 +122,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -1578,6 +1579,31 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_7_baco_enter(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
+ return smu_v13_0_baco_set_armd3_sequence(smu,
+ smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+ else
+ return smu_v13_0_baco_enter(smu);
+}
+
+static int smu_v13_0_7_baco_exit(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ /* Wait for PMFW handling for the Dstate change */
+ usleep_range(10000, 11000);
+ return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ } else {
+ return smu_v13_0_baco_exit(smu);
+ }
+}
+
static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -1655,8 +1681,8 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.baco_is_support = smu_v13_0_baco_is_support,
.baco_get_state = smu_v13_0_baco_get_state,
.baco_set_state = smu_v13_0_baco_set_state,
- .baco_enter = smu_v13_0_baco_enter,
- .baco_exit = smu_v13_0_baco_exit,
+ .baco_enter = smu_v13_0_7_baco_enter,
+ .baco_exit = smu_v13_0_7_baco_exit,
.mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
index 3ea53bb67d3b..bd61e20770a5 100644
--- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
@@ -63,23 +63,45 @@
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
u8 offset, void *buffer, size_t size)
{
+ u8 zero = 0;
+ char *tmpbuf = NULL;
+ /*
+ * As sub-addressing is not supported by all adaptors,
+ * always explicitly read from the start and discard
+ * any bytes that come before the requested offset.
+ * This way, no matter whether the adaptor supports it
+ * or not, we'll end up reading the proper data.
+ */
struct i2c_msg msgs[] = {
{
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = 0,
.len = 1,
- .buf = &offset,
+ .buf = &zero,
},
{
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = I2C_M_RD,
- .len = size,
+ .len = size + offset,
.buf = buffer,
},
};
int ret;
+ if (offset) {
+ tmpbuf = kmalloc(size + offset, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ msgs[1].buf = tmpbuf;
+ }
+
ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
+ if (tmpbuf)
+ memcpy(buffer, tmpbuf + offset, size);
+
+ kfree(tmpbuf);
+
if (ret < 0)
return ret;
if (ret != ARRAY_SIZE(msgs))
@@ -208,18 +230,6 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(const struct drm_device *dev,
if (ret)
return DRM_DP_DUAL_MODE_UNKNOWN;
- /*
- * Sigh. Some (maybe all?) type 1 adaptors are broken and ack
- * the offset but ignore it, and instead they just always return
- * data from the start of the HDMI ID buffer. So for a broken
- * type 1 HDMI adaptor a single byte read will always give us
- * 0x44, and for a type 1 DVI adaptor it should give 0x00
- * (assuming it implements any registers). Fortunately neither
- * of those values will match the type 2 signature of the
- * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with
- * the type 2 adaptor detection safely even in the presence
- * of broken type 1 adaptors.
- */
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
drm_dbg_kms(dev, "DP dual mode adaptor ID: %02x (err %zd)\n", adaptor_id, ret);
@@ -233,11 +243,10 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(const struct drm_device *dev,
return DRM_DP_DUAL_MODE_TYPE2_DVI;
}
/*
- * If neither a proper type 1 ID nor a broken type 1 adaptor
- * as described above, assume type 1, but let the user know
- * that we may have misdetected the type.
+ * If not a proper type 1 ID, still assume type 1, but let
+ * the user know that we may have misdetected the type.
*/
- if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0])
+ if (!is_type1_adaptor(adaptor_id))
drm_err(dev, "Unexpected DP dual mode adaptor ID %02x\n", adaptor_id);
}
@@ -343,10 +352,8 @@ EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output);
* @enable: enable (as opposed to disable) the TMDS output buffers
*
* Set the state of the TMDS output buffers in the adaptor. For
- * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As
- * some type 1 adaptors have problems with registers (see comments
- * in drm_dp_dual_mode_detect()) we avoid touching the register,
- * making this function a no-op on type 1 adaptors.
+ * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register.
+ * Type1 adaptors do not support any register writes.
*
* Returns:
* 0 on success, negative error code on failure
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index ecd22c038c8c..51a46689cda7 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -5186,7 +5186,7 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm
mst_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(mst_state))
- return -EINVAL;
+ return PTR_ERR(mst_state);
list_for_each_entry(pos, &mst_state->payloads, next) {
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8214a0b1ab7f..203bf8d6c34c 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -615,7 +615,7 @@ static int drm_dev_init(struct drm_device *dev,
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
- ret = drmm_add_action(dev, drm_dev_init_release, NULL);
+ ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 7bb98e6a446d..5ea5e260118c 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -104,7 +104,8 @@ static inline void drm_vblank_flush_worker(struct drm_vblank_crtc *vblank)
static inline void drm_vblank_destroy_worker(struct drm_vblank_crtc *vblank)
{
- kthread_destroy_worker(vblank->worker);
+ if (vblank->worker)
+ kthread_destroy_worker(vblank->worker);
}
int drm_vblank_worker_init(struct drm_vblank_crtc *vblank);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 939d621c9ad4..688c8afe0bf1 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -151,9 +151,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->registration_state != DRM_CONNECTOR_REGISTERED)
- continue;
-
/* only expose writeback connectors if userspace understands them */
if (!file_priv->writeback_connectors &&
(connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 461c62c88413..de77054195c6 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -3723,12 +3723,16 @@ out:
static u8 bigjoiner_pipes(struct drm_i915_private *i915)
{
+ u8 pipes;
+
if (DISPLAY_VER(i915) >= 12)
- return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
+ pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
else if (DISPLAY_VER(i915) >= 11)
- return BIT(PIPE_B) | BIT(PIPE_C);
+ pipes = BIT(PIPE_B) | BIT(PIPE_C);
else
- return 0;
+ pipes = 0;
+
+ return pipes & RUNTIME_INFO(i915)->pipe_mask;
}
static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 1e608b9e5055..1a63da28f330 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -2434,7 +2434,7 @@ intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
{
const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
- if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID)
+ if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_PORT_DDI_IO_A;
return domains->ddi_io + (int)(port - domains->port_start);
@@ -2445,7 +2445,7 @@ intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port po
{
const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
- if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID)
+ if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_PORT_DDI_LANES_A;
return domains->ddi_lanes + (int)(port - domains->port_start);
@@ -2471,7 +2471,7 @@ intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch
{
const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
- if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)
+ if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_A;
return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
@@ -2482,7 +2482,7 @@ intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch au
{
const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
- if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID)
+ if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_TBT1;
return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index a4aa9500fa17..0d6d640225fc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -612,6 +612,10 @@ static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
+ err = ttm_bo_wait(bo, true, false);
+ if (err)
+ return err;
+
err = i915_ttm_move_notify(bo);
if (err)
return err;
@@ -1013,9 +1017,6 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
- if (i915_ttm_cpu_maps_iomem(bo->resource))
- wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
-
if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV;
int i;
@@ -1042,6 +1043,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
}
}
+ if (i915_ttm_cpu_maps_iomem(bo->resource))
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
if (drm_dev_enter(dev, &idx)) {
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index d0b03a928b9a..7caa3412a244 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -625,8 +625,13 @@ int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
return -EINTR;
}
- return timeout ? timeout : intel_uc_wait_for_idle(&gt->uc,
- remaining_timeout);
+ if (timeout)
+ return timeout;
+
+ if (remaining_timeout < 0)
+ remaining_timeout = 0;
+
+ return intel_uc_wait_for_idle(&gt->uc, remaining_timeout);
}
int intel_gt_init(struct intel_gt *gt)
@@ -1017,6 +1022,11 @@ static void mmio_invalidate_full(struct intel_gt *gt)
if (!i915_mmio_reg_offset(rb.reg))
continue;
+ if (GRAPHICS_VER(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
+ engine->class == VIDEO_ENHANCEMENT_CLASS ||
+ engine->class == COMPUTE_CLASS))
+ rb.bit = _MASKED_BIT_ENABLE(rb.bit);
+
intel_uncore_write_fw(uncore, rb.reg, rb.bit);
awake |= engine->mask;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index edb881d75630..1dfd01668c79 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -199,7 +199,7 @@ out_active: spin_lock(&timelines->lock);
if (remaining_timeout)
*remaining_timeout = timeout;
- return active_count ? timeout : 0;
+ return active_count ? timeout ?: -ETIME : 0;
}
static void retire_work_handler(struct work_struct *work)
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 7a45e5360caf..714221f9a131 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -664,8 +664,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
return -ESRCH;
}
- kvm_get_kvm(vgpu->vfio_device.kvm);
-
if (__kvmgt_vgpu_exist(vgpu))
return -EEXIST;
@@ -676,6 +674,7 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
vgpu->track_node.track_write = kvmgt_page_track_write;
vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
+ kvm_get_kvm(vgpu->vfio_device.kvm);
kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
@@ -715,15 +714,14 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
+ kvm_put_kvm(vgpu->vfio_device.kvm);
+
kvmgt_protect_table_destroy(vgpu);
gvt_cache_destroy(vgpu);
intel_vgpu_release_msi_eventfd_ctx(vgpu);
vgpu->attached = false;
-
- if (vgpu->vfio_device.kvm)
- kvm_put_kvm(vgpu->vfio_device.kvm);
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 2403ccd52c74..bba8cb6e8ae4 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -471,8 +471,7 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915)
u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
struct dram_info *dram_info = &i915->dram_info;
- val = REG_FIELD_GET(MTL_DDR_TYPE_MASK, val);
- switch (val) {
+ switch (REG_FIELD_GET(MTL_DDR_TYPE_MASK, val)) {
case 0:
dram_info->type = INTEL_DRAM_DDR4;
break;
diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c
index 011be7ff51e1..bc8fb4e38d0a 100644
--- a/drivers/gpu/drm/lima/lima_devfreq.c
+++ b/drivers/gpu/drm/lima/lima_devfreq.c
@@ -112,11 +112,6 @@ int lima_devfreq_init(struct lima_device *ldev)
unsigned long cur_freq;
int ret;
const char *regulator_names[] = { "mali", NULL };
- const char *clk_names[] = { "core", NULL };
- struct dev_pm_opp_config config = {
- .regulator_names = regulator_names,
- .clk_names = clk_names,
- };
if (!device_property_present(dev, "operating-points-v2"))
/* Optional, continue without devfreq */
@@ -124,7 +119,15 @@ int lima_devfreq_init(struct lima_device *ldev)
spin_lock_init(&ldevfreq->lock);
- ret = devm_pm_opp_set_config(dev, &config);
+ /*
+ * clkname is set separately so it is not affected by the optional
+ * regulator setting which may return error.
+ */
+ ret = devm_pm_opp_set_clkname(dev, "core");
+ if (ret)
+ return ret;
+
+ ret = devm_pm_opp_set_regulators(dev, regulator_names);
if (ret) {
/* Continue if the optional regulator is missing */
if (ret != -ENODEV)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 2944228a8e2c..8a3b685c2fcc 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2500,6 +2500,7 @@ static const struct display_timing logictechno_lt161010_2nh_timing = {
static const struct panel_desc logictechno_lt161010_2nh = {
.timings = &logictechno_lt161010_2nh_timing,
.num_timings = 1,
+ .bpc = 6,
.size = {
.width = 154,
.height = 86,
@@ -2529,6 +2530,7 @@ static const struct display_timing logictechno_lt170410_2whc_timing = {
static const struct panel_desc logictechno_lt170410_2whc = {
.timings = &logictechno_lt170410_2whc_timing,
.num_timings = 1,
+ .bpc = 8,
.size = {
.width = 217,
.height = 136,
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 6748ec1e0005..a1f909dac89a 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1093,6 +1093,10 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev)
struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
struct iommu_domain *domain;
+ /* Our IOMMU usage policy doesn't currently play well with GART */
+ if (of_machine_is_compatible("nvidia,tegra20"))
+ return false;
+
/*
* If the Tegra DRM clients are backed by an IOMMU, push buffers are
* likely to be allocated beyond the 32-bit boundary if sufficient
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 4419e810103d..0a6347c05df4 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -197,8 +197,8 @@ vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
struct drm_private_state *priv_state;
priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
- if (IS_ERR(priv_state))
- return ERR_CAST(priv_state);
+ if (!priv_state)
+ return ERR_PTR(-EINVAL);
return to_vc4_hvs_state(priv_state);
}
@@ -210,8 +210,8 @@ vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
struct drm_private_state *priv_state;
priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
- if (IS_ERR(priv_state))
- return ERR_CAST(priv_state);
+ if (!priv_state)
+ return ERR_PTR(-EINVAL);
return to_vc4_hvs_state(priv_state);
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 0cd3f97e7e49..f60ea24db0ec 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -292,6 +292,10 @@ static void host1x_setup_virtualization_tables(struct host1x *host)
static bool host1x_wants_iommu(struct host1x *host1x)
{
+ /* Our IOMMU usage policy doesn't currently play well with GART */
+ if (of_machine_is_compatible("nvidia,tegra20"))
+ return false;
+
/*
* If we support addressing a maximum of 32 bits of physical memory
* and if the host1x firewall is enabled, there's no need to enable
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 5b120402d405..cc23b90cae02 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -533,13 +533,17 @@ static void vmbus_add_channel_work(struct work_struct *work)
* Add the new device to the bus. This will kick off device-driver
* binding which eventually invokes the device driver's AddDevice()
* method.
+ *
+ * If vmbus_device_register() fails, the 'device_obj' is freed in
+ * vmbus_device_release() as called by device_unregister() in the
+ * error path of vmbus_device_register(). In the outside error
+ * path, there's no need to free it.
*/
ret = vmbus_device_register(newchannel->device_obj);
if (ret != 0) {
pr_err("unable to add child device object (relid %d)\n",
newchannel->offermsg.child_relid);
- kfree(newchannel->device_obj);
goto err_deq_chan;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8b2e413bf19c..e592c481f7ae 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2082,6 +2082,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
ret = device_register(&child_device_obj->device);
if (ret) {
pr_err("Unable to register child device\n");
+ put_device(&child_device_obj->device);
return ret;
}
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 81e688975c6a..a901e4e33d81 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -938,6 +938,8 @@ static int asus_ec_probe(struct platform_device *pdev)
ec_data->nr_sensors = hweight_long(ec_data->board_info->sensors);
ec_data->sensors = devm_kcalloc(dev, ec_data->nr_sensors,
sizeof(struct ec_sensor), GFP_KERNEL);
+ if (!ec_data->sensors)
+ return -ENOMEM;
status = setup_lock_data(dev);
if (status) {
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 8bf32c6c85d9..9bee4d33fbdf 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -242,10 +242,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
*/
if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) {
for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) {
- if (host_bridge->device == tjmax_pci_table[i].device)
+ if (host_bridge->device == tjmax_pci_table[i].device) {
+ pci_dev_put(host_bridge);
return tjmax_pci_table[i].tjmax;
+ }
}
}
+ pci_dev_put(host_bridge);
for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
if (strstr(c->x86_model_id, tjmax_table[i].id))
@@ -533,6 +536,10 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
{
struct temp_data *tdata = pdata->core_data[indx];
+ /* if we errored on add then this is already gone */
+ if (!tdata)
+ return;
+
/* Remove the sysfs attributes */
sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group);
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
index 05f68e9c9477..23b9f94fe0a9 100644
--- a/drivers/hwmon/i5500_temp.c
+++ b/drivers/hwmon/i5500_temp.c
@@ -117,7 +117,7 @@ static int i5500_temp_probe(struct pci_dev *pdev,
u32 tstimer;
s8 tsfsc;
- err = pci_enable_device(pdev);
+ err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Failed to enable device\n");
return err;
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index f6ec165c0fa8..1837cccd993c 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -502,6 +502,7 @@ static void ibmpex_register_bmc(int iface, struct device *dev)
return;
out_register:
+ list_del(&data->list);
hwmon_device_unregister(data->hwmon_dev);
out_user:
ipmi_destroy_user(data->user);
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 2a57f4b60c29..e06186986444 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -228,7 +228,7 @@ static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
* Shunt Voltage Sum register has 14-bit value with 1-bit shift
* Other Shunt Voltage registers have 12 bits with 3-bit shift
*/
- if (reg == INA3221_SHUNT_SUM)
+ if (reg == INA3221_SHUNT_SUM || reg == INA3221_CRIT_SUM)
*val = sign_extend32(regval >> 1, 14);
else
*val = sign_extend32(regval >> 3, 12);
@@ -465,7 +465,7 @@ static int ina3221_write_curr(struct device *dev, u32 attr,
* SHUNT_SUM: (1 / 40uV) << 1 = 1 / 20uV
* SHUNT[1-3]: (1 / 40uV) << 3 = 1 / 5uV
*/
- if (reg == INA3221_SHUNT_SUM)
+ if (reg == INA3221_SHUNT_SUM || reg == INA3221_CRIT_SUM)
regval = DIV_ROUND_CLOSEST(voltage_uv, 20) & 0xfffe;
else
regval = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c
index 7404e974762f..2dbbbac9de09 100644
--- a/drivers/hwmon/ltc2947-core.c
+++ b/drivers/hwmon/ltc2947-core.c
@@ -396,7 +396,7 @@ static int ltc2947_read_temp(struct device *dev, const u32 attr, long *val,
return ret;
/* in milidegrees celcius, temp is given by: */
- *val = (__val * 204) + 550;
+ *val = (__val * 204) + 5500;
return 0;
}
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index fe0cd205502d..f58943cb1341 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -852,7 +852,8 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
CDNS_I2C_POLL_US, CDNS_I2C_TIMEOUT_US);
if (ret) {
ret = -EAGAIN;
- i2c_recover_bus(adap);
+ if (id->adap.bus_recovery_info)
+ i2c_recover_bus(adap);
goto out;
}
@@ -1263,8 +1264,13 @@ static int cdns_i2c_probe(struct platform_device *pdev)
id->rinfo.pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(id->rinfo.pinctrl)) {
+ int err = PTR_ERR(id->rinfo.pinctrl);
+
dev_info(&pdev->dev, "can't get pinctrl, bus recovery not supported\n");
- return PTR_ERR(id->rinfo.pinctrl);
+ if (err != -ENODEV)
+ return err;
+ } else {
+ id->adap.bus_recovery_info = &id->rinfo;
}
id->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &r_mem);
@@ -1283,7 +1289,6 @@ static int cdns_i2c_probe(struct platform_device *pdev)
id->adap.retries = 3; /* Default retry value. */
id->adap.algo_data = id;
id->adap.dev.parent = &pdev->dev;
- id->adap.bus_recovery_info = &id->rinfo;
init_completion(&id->xfer_done);
snprintf(id->adap.name, sizeof(id->adap.name),
"Cadence I2C at %08lx", (unsigned long)r_mem->start);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 3082183bd66a..fc70920c4dda 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1132,7 +1132,8 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
int i, result;
unsigned int temp;
int block_data = msgs->flags & I2C_M_RECV_LEN;
- int use_dma = i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data;
+ int use_dma = i2c_imx->dma && msgs->flags & I2C_M_DMA_SAFE &&
+ msgs->len >= DMA_THRESHOLD && !block_data;
dev_dbg(&i2c_imx->adapter.dev,
"<%s> write slave address: addr=0x%x\n",
@@ -1298,7 +1299,8 @@ static int i2c_imx_xfer_common(struct i2c_adapter *adapter,
result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg, atomic);
} else {
if (!atomic &&
- i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD)
+ i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD &&
+ msgs[i].flags & I2C_M_DMA_SAFE)
result = i2c_imx_dma_write(i2c_imx, &msgs[i]);
else
result = i2c_imx_write(i2c_imx, &msgs[i], atomic);
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 0c365b57d957..83457359ec45 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -2393,8 +2393,17 @@ static struct platform_driver npcm_i2c_bus_driver = {
static int __init npcm_i2c_init(void)
{
+ int ret;
+
npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL);
- return platform_driver_register(&npcm_i2c_bus_driver);
+
+ ret = platform_driver_register(&npcm_i2c_bus_driver);
+ if (ret) {
+ debugfs_remove_recursive(npcm_i2c_debugfs_dir);
+ return ret;
+ }
+
+ return 0;
}
module_init(npcm_i2c_init);
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 84a77512614d..8fce98bb77ff 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -626,7 +626,6 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
dev_err(gi2c->se.dev, "I2C timeout gpi flags:%d addr:0x%x\n",
gi2c->cur->flags, gi2c->cur->addr);
gi2c->err = -ETIMEDOUT;
- goto err;
}
if (gi2c->err) {
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index b4edf10e8fd0..7539b0740351 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -467,6 +467,7 @@ static int i2c_device_probe(struct device *dev)
{
struct i2c_client *client = i2c_verify_client(dev);
struct i2c_driver *driver;
+ bool do_power_on;
int status;
if (!client)
@@ -545,8 +546,8 @@ static int i2c_device_probe(struct device *dev)
if (status < 0)
goto err_clear_wakeup_irq;
- status = dev_pm_domain_attach(&client->dev,
- !i2c_acpi_waive_d0_probe(dev));
+ do_power_on = !i2c_acpi_waive_d0_probe(dev);
+ status = dev_pm_domain_attach(&client->dev, do_power_on);
if (status)
goto err_clear_wakeup_irq;
@@ -585,7 +586,7 @@ static int i2c_device_probe(struct device *dev)
err_release_driver_resources:
devres_release_group(&client->dev, client->devres_group_id);
err_detach_pm_domain:
- dev_pm_domain_detach(&client->dev, !i2c_acpi_waive_d0_probe(dev));
+ dev_pm_domain_detach(&client->dev, do_power_on);
err_clear_wakeup_irq:
dev_pm_clear_wake_irq(&client->dev);
device_init_wakeup(&client->dev, false);
@@ -610,7 +611,7 @@ static void i2c_device_remove(struct device *dev)
devres_release_group(&client->dev, client->devres_group_id);
- dev_pm_domain_detach(&client->dev, !i2c_acpi_waive_d0_probe(dev));
+ dev_pm_domain_detach(&client->dev, true);
dev_pm_clear_wake_irq(&client->dev);
device_init_wakeup(&client->dev, false);
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index ad8fce3e08cd..6e4d10a7cd32 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -805,8 +805,10 @@ static int bma400_get_steps_reg(struct bma400_data *data, int *val)
ret = regmap_bulk_read(data->regmap, BMA400_STEP_CNT0_REG,
steps_raw, BMA400_STEP_RAW_LEN);
- if (ret)
+ if (ret) {
+ kfree(steps_raw);
return ret;
+ }
*val = get_unaligned_le24(steps_raw);
kfree(steps_raw);
return IIO_VAL_INT;
@@ -869,18 +871,6 @@ static int bma400_init(struct bma400_data *data)
unsigned int val;
int ret;
- /* Try to read chip_id register. It must return 0x90. */
- ret = regmap_read(data->regmap, BMA400_CHIP_ID_REG, &val);
- if (ret) {
- dev_err(data->dev, "Failed to read chip id register\n");
- return ret;
- }
-
- if (val != BMA400_ID_REG_VAL) {
- dev_err(data->dev, "Chip ID mismatch\n");
- return -ENODEV;
- }
-
data->regulators[BMA400_VDD_REGULATOR].supply = "vdd";
data->regulators[BMA400_VDDIO_REGULATOR].supply = "vddio";
ret = devm_regulator_bulk_get(data->dev,
@@ -906,6 +896,18 @@ static int bma400_init(struct bma400_data *data)
if (ret)
return ret;
+ /* Try to read chip_id register. It must return 0x90. */
+ ret = regmap_read(data->regmap, BMA400_CHIP_ID_REG, &val);
+ if (ret) {
+ dev_err(data->dev, "Failed to read chip id register\n");
+ return ret;
+ }
+
+ if (val != BMA400_ID_REG_VAL) {
+ dev_err(data->dev, "Chip ID mismatch\n");
+ return -ENODEV;
+ }
+
ret = bma400_get_power_mode(data);
if (ret) {
dev_err(data->dev, "Failed to get the initial power-mode\n");
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index 9341e0e0eb55..998e8bcc06e1 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -202,6 +202,8 @@ static int aspeed_adc_set_trim_data(struct iio_dev *indio_dev)
((scu_otp) &
(data->model_data->trim_locate->field)) >>
__ffs(data->model_data->trim_locate->field);
+ if (!trimming_val)
+ trimming_val = 0x8;
}
dev_dbg(data->dev,
"trimming val = %d, offset = %08x, fields = %08x\n",
@@ -563,12 +565,9 @@ static int aspeed_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (of_find_property(data->dev->of_node, "aspeed,trim-data-valid",
- NULL)) {
- ret = aspeed_adc_set_trim_data(indio_dev);
- if (ret)
- return ret;
- }
+ ret = aspeed_adc_set_trim_data(indio_dev);
+ if (ret)
+ return ret;
if (of_find_property(data->dev->of_node, "aspeed,battery-sensing",
NULL)) {
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 33e251552214..870f4cb60923 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -2307,11 +2307,9 @@ static int at91_adc_temp_sensor_init(struct at91_adc_state *st,
clb->p6 = buf[AT91_ADC_TS_CLB_IDX_P6];
/*
- * We prepare here the conversion to milli and also add constant
- * factor (5 degrees Celsius) to p1 here to avoid doing it on
- * hotpath.
+ * We prepare here the conversion to milli to avoid doing it on hotpath.
*/
- clb->p1 = clb->p1 * 1000 + 5000;
+ clb->p1 = clb->p1 * 1000;
free_buf:
kfree(buf);
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 532daaa6f943..366e252ebeb0 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -634,8 +634,10 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev,
trig->ops = &at91_adc_trigger_ops;
ret = iio_trigger_register(trig);
- if (ret)
+ if (ret) {
+ iio_trigger_free(trig);
return NULL;
+ }
return trig;
}
diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c
index 30a31f185d08..88e947f300cf 100644
--- a/drivers/iio/adc/mp2629_adc.c
+++ b/drivers/iio/adc/mp2629_adc.c
@@ -57,7 +57,8 @@ static struct iio_map mp2629_adc_maps[] = {
MP2629_MAP(SYSTEM_VOLT, "system-volt"),
MP2629_MAP(INPUT_VOLT, "input-volt"),
MP2629_MAP(BATT_CURRENT, "batt-current"),
- MP2629_MAP(INPUT_CURRENT, "input-current")
+ MP2629_MAP(INPUT_CURRENT, "input-current"),
+ { }
};
static int mp2629_read_raw(struct iio_dev *indio_dev,
@@ -74,7 +75,7 @@ static int mp2629_read_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- if (chan->address == MP2629_INPUT_VOLT)
+ if (chan->channel == MP2629_INPUT_VOLT)
rval &= GENMASK(6, 0);
*val = rval;
return IIO_VAL_INT;
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 3bb4028c5d74..df3bc5c3d378 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -245,14 +245,14 @@ static int afe4403_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct afe4403_data *afe = iio_priv(indio_dev);
- unsigned int reg = afe4403_channel_values[chan->address];
- unsigned int field = afe4403_channel_leds[chan->address];
+ unsigned int reg, field;
int ret;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ reg = afe4403_channel_values[chan->address];
ret = afe4403_read(afe, reg, val);
if (ret)
return ret;
@@ -262,6 +262,7 @@ static int afe4403_read_raw(struct iio_dev *indio_dev,
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ field = afe4403_channel_leds[chan->address];
ret = regmap_field_read(afe->fields[field], val);
if (ret)
return ret;
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 8fca787b2524..836da31b7e30 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -250,20 +250,20 @@ static int afe4404_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct afe4404_data *afe = iio_priv(indio_dev);
- unsigned int value_reg = afe4404_channel_values[chan->address];
- unsigned int led_field = afe4404_channel_leds[chan->address];
- unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
+ unsigned int value_reg, led_field, offdac_field;
int ret;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ value_reg = afe4404_channel_values[chan->address];
ret = regmap_read(afe->regmap, value_reg, val);
if (ret)
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
+ offdac_field = afe4404_channel_offdacs[chan->address];
ret = regmap_field_read(afe->fields[offdac_field], val);
if (ret)
return ret;
@@ -273,6 +273,7 @@ static int afe4404_read_raw(struct iio_dev *indio_dev,
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ led_field = afe4404_channel_leds[chan->address];
ret = regmap_field_read(afe->fields[led_field], val);
if (ret)
return ret;
@@ -295,19 +296,20 @@ static int afe4404_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct afe4404_data *afe = iio_priv(indio_dev);
- unsigned int led_field = afe4404_channel_leds[chan->address];
- unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
+ unsigned int led_field, offdac_field;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_OFFSET:
+ offdac_field = afe4404_channel_offdacs[chan->address];
return regmap_field_write(afe->fields[offdac_field], val);
}
break;
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ led_field = afe4404_channel_leds[chan->address];
return regmap_field_write(afe->fields[led_field], val);
}
break;
diff --git a/drivers/iio/imu/bno055/bno055.c b/drivers/iio/imu/bno055/bno055.c
index 307557a609e3..52744dd98e65 100644
--- a/drivers/iio/imu/bno055/bno055.c
+++ b/drivers/iio/imu/bno055/bno055.c
@@ -632,7 +632,7 @@ static int bno055_set_regmask(struct bno055_priv *priv, int val, int val2,
return -EINVAL;
}
delta = abs(tbl_val - req_val);
- if (delta < best_delta || first) {
+ if (first || delta < best_delta) {
best_delta = delta;
hwval = i;
first = false;
diff --git a/drivers/iio/industrialio-sw-trigger.c b/drivers/iio/industrialio-sw-trigger.c
index 994f03a71520..d86a3305d9e8 100644
--- a/drivers/iio/industrialio-sw-trigger.c
+++ b/drivers/iio/industrialio-sw-trigger.c
@@ -58,8 +58,12 @@ int iio_register_sw_trigger_type(struct iio_sw_trigger_type *t)
t->group = configfs_register_default_group(iio_triggers_group, t->name,
&iio_trigger_type_group_type);
- if (IS_ERR(t->group))
+ if (IS_ERR(t->group)) {
+ mutex_lock(&iio_trigger_types_lock);
+ list_del(&t->list);
+ mutex_unlock(&iio_trigger_types_lock);
ret = PTR_ERR(t->group);
+ }
return ret;
}
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 7cf6e8490123..0d4447df7200 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -293,6 +293,8 @@ config RPR0521
tristate "ROHM RPR0521 ALS and proximity sensor driver"
depends on I2C
select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say Y here if you want to build support for ROHM's RPR0521
ambient light and proximity sensor device.
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index b62c139baf41..38d4c7644bef 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -54,9 +54,6 @@
#define APDS9960_REG_CONTROL_PGAIN_MASK_SHIFT 2
#define APDS9960_REG_CONFIG_2 0x90
-#define APDS9960_REG_CONFIG_2_GGAIN_MASK 0x60
-#define APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT 5
-
#define APDS9960_REG_ID 0x92
#define APDS9960_REG_STATUS 0x93
@@ -77,6 +74,9 @@
#define APDS9960_REG_GCONF_1_GFIFO_THRES_MASK_SHIFT 6
#define APDS9960_REG_GCONF_2 0xa3
+#define APDS9960_REG_GCONF_2_GGAIN_MASK 0x60
+#define APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT 5
+
#define APDS9960_REG_GOFFSET_U 0xa4
#define APDS9960_REG_GOFFSET_D 0xa5
#define APDS9960_REG_GPULSE 0xa6
@@ -396,9 +396,9 @@ static int apds9960_set_pxs_gain(struct apds9960_data *data, int val)
}
ret = regmap_update_bits(data->regmap,
- APDS9960_REG_CONFIG_2,
- APDS9960_REG_CONFIG_2_GGAIN_MASK,
- idx << APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT);
+ APDS9960_REG_GCONF_2,
+ APDS9960_REG_GCONF_2_GGAIN_MASK,
+ idx << APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT);
if (!ret)
data->pxs_gain = idx;
mutex_unlock(&data->lock);
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index cbc9349c342a..550b75b7186f 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -25,13 +25,6 @@ enum {
MS5607,
};
-struct ms5611_chip_info {
- u16 prom[MS5611_PROM_WORDS_NB];
-
- int (*temp_and_pressure_compensate)(struct ms5611_chip_info *chip_info,
- s32 *temp, s32 *pressure);
-};
-
/*
* OverSampling Rate descriptor.
* Warning: cmd MUST be kept aligned on a word boundary (see
@@ -50,12 +43,15 @@ struct ms5611_state {
const struct ms5611_osr *pressure_osr;
const struct ms5611_osr *temp_osr;
+ u16 prom[MS5611_PROM_WORDS_NB];
+
int (*reset)(struct ms5611_state *st);
int (*read_prom_word)(struct ms5611_state *st, int index, u16 *word);
int (*read_adc_temp_and_pressure)(struct ms5611_state *st,
s32 *temp, s32 *pressure);
- struct ms5611_chip_info *chip_info;
+ int (*compensate_temp_and_pressure)(struct ms5611_state *st, s32 *temp,
+ s32 *pressure);
struct regulator *vdd;
};
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 717521de66c4..c564a1d6cafe 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -85,7 +85,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev)
struct ms5611_state *st = iio_priv(indio_dev);
for (i = 0; i < MS5611_PROM_WORDS_NB; i++) {
- ret = st->read_prom_word(st, i, &st->chip_info->prom[i]);
+ ret = st->read_prom_word(st, i, &st->prom[i]);
if (ret < 0) {
dev_err(&indio_dev->dev,
"failed to read prom at %d\n", i);
@@ -93,7 +93,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev)
}
}
- if (!ms5611_prom_is_valid(st->chip_info->prom, MS5611_PROM_WORDS_NB)) {
+ if (!ms5611_prom_is_valid(st->prom, MS5611_PROM_WORDS_NB)) {
dev_err(&indio_dev->dev, "PROM integrity check failed\n");
return -ENODEV;
}
@@ -114,21 +114,20 @@ static int ms5611_read_temp_and_pressure(struct iio_dev *indio_dev,
return ret;
}
- return st->chip_info->temp_and_pressure_compensate(st->chip_info,
- temp, pressure);
+ return st->compensate_temp_and_pressure(st, temp, pressure);
}
-static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
+static int ms5611_temp_and_pressure_compensate(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
s32 t = *temp, p = *pressure;
s64 off, sens, dt;
- dt = t - (chip_info->prom[5] << 8);
- off = ((s64)chip_info->prom[2] << 16) + ((chip_info->prom[4] * dt) >> 7);
- sens = ((s64)chip_info->prom[1] << 15) + ((chip_info->prom[3] * dt) >> 8);
+ dt = t - (st->prom[5] << 8);
+ off = ((s64)st->prom[2] << 16) + ((st->prom[4] * dt) >> 7);
+ sens = ((s64)st->prom[1] << 15) + ((st->prom[3] * dt) >> 8);
- t = 2000 + ((chip_info->prom[6] * dt) >> 23);
+ t = 2000 + ((st->prom[6] * dt) >> 23);
if (t < 2000) {
s64 off2, sens2, t2;
@@ -154,17 +153,17 @@ static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_inf
return 0;
}
-static int ms5607_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
+static int ms5607_temp_and_pressure_compensate(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
s32 t = *temp, p = *pressure;
s64 off, sens, dt;
- dt = t - (chip_info->prom[5] << 8);
- off = ((s64)chip_info->prom[2] << 17) + ((chip_info->prom[4] * dt) >> 6);
- sens = ((s64)chip_info->prom[1] << 16) + ((chip_info->prom[3] * dt) >> 7);
+ dt = t - (st->prom[5] << 8);
+ off = ((s64)st->prom[2] << 17) + ((st->prom[4] * dt) >> 6);
+ sens = ((s64)st->prom[1] << 16) + ((st->prom[3] * dt) >> 7);
- t = 2000 + ((chip_info->prom[6] * dt) >> 23);
+ t = 2000 + ((st->prom[6] * dt) >> 23);
if (t < 2000) {
s64 off2, sens2, t2, tmp;
@@ -342,15 +341,6 @@ static int ms5611_write_raw(struct iio_dev *indio_dev,
static const unsigned long ms5611_scan_masks[] = {0x3, 0};
-static struct ms5611_chip_info chip_info_tbl[] = {
- [MS5611] = {
- .temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate,
- },
- [MS5607] = {
- .temp_and_pressure_compensate = ms5607_temp_and_pressure_compensate,
- }
-};
-
static const struct iio_chan_spec ms5611_channels[] = {
{
.type = IIO_PRESSURE,
@@ -433,7 +423,20 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
struct ms5611_state *st = iio_priv(indio_dev);
mutex_init(&st->lock);
- st->chip_info = &chip_info_tbl[type];
+
+ switch (type) {
+ case MS5611:
+ st->compensate_temp_and_pressure =
+ ms5611_temp_and_pressure_compensate;
+ break;
+ case MS5607:
+ st->compensate_temp_and_pressure =
+ ms5607_temp_and_pressure_compensate;
+ break;
+ default:
+ return -EINVAL;
+ }
+
st->temp_osr =
&ms5611_avail_temp_osr[ARRAY_SIZE(ms5611_avail_temp_osr) - 1];
st->pressure_osr =
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 432e912096f4..a0a7205c9c3a 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -91,7 +91,7 @@ static int ms5611_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
spi->mode = SPI_MODE_0;
- spi->max_speed_hz = 20000000;
+ spi->max_speed_hz = min(spi->max_speed_hz, 20000000U);
spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret < 0)
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index d6c5e9644738..6b05eed41612 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -203,9 +203,13 @@ static int iio_sysfs_trigger_remove(int id)
static int __init iio_sysfs_trig_init(void)
{
+ int ret;
device_initialize(&iio_sysfs_trig_dev);
dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger");
- return device_add(&iio_sysfs_trig_dev);
+ ret = device_add(&iio_sysfs_trig_dev);
+ if (ret)
+ put_device(&iio_sysfs_trig_dev);
+ return ret;
}
module_init(iio_sysfs_trig_init);
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index b86de1312512..84b87526b7ba 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -273,22 +273,22 @@ int iforce_init_device(struct device *parent, u16 bustype,
* Get device info.
*/
- if (!iforce_get_id_packet(iforce, 'M', buf, &len) || len < 3)
+ if (!iforce_get_id_packet(iforce, 'M', buf, &len) && len >= 3)
input_dev->id.vendor = get_unaligned_le16(buf + 1);
else
dev_warn(&iforce->dev->dev, "Device does not respond to id packet M\n");
- if (!iforce_get_id_packet(iforce, 'P', buf, &len) || len < 3)
+ if (!iforce_get_id_packet(iforce, 'P', buf, &len) && len >= 3)
input_dev->id.product = get_unaligned_le16(buf + 1);
else
dev_warn(&iforce->dev->dev, "Device does not respond to id packet P\n");
- if (!iforce_get_id_packet(iforce, 'B', buf, &len) || len < 3)
+ if (!iforce_get_id_packet(iforce, 'B', buf, &len) && len >= 3)
iforce->device_memory.end = get_unaligned_le16(buf + 1);
else
dev_warn(&iforce->dev->dev, "Device does not respond to id packet B\n");
- if (!iforce_get_id_packet(iforce, 'N', buf, &len) || len < 2)
+ if (!iforce_get_id_packet(iforce, 'N', buf, &len) && len >= 2)
ff_effects = buf[1];
else
dev_warn(&iforce->dev->dev, "Device does not respond to id packet N\n");
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 480476121c01..09489380afda 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -18,6 +18,10 @@
#include <linux/gpio.h>
#include <linux/platform_device.h>
+static bool use_low_level_irq;
+module_param(use_low_level_irq, bool, 0444);
+MODULE_PARM_DESC(use_low_level_irq, "Use low-level triggered IRQ instead of edge triggered");
+
struct soc_button_info {
const char *name;
int acpi_index;
@@ -74,6 +78,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = {
},
},
{
+ /* Acer Switch V 10 SW5-017, same issue as Acer Switch 10 SW5-012. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
+ },
+ },
+ {
/*
* Acer One S1003. _LID method messes with power-button GPIO
* IRQ settings, leading to a non working power-button.
@@ -164,7 +175,8 @@ soc_button_device_create(struct platform_device *pdev,
}
/* See dmi_use_low_level_irq[] comment */
- if (!autorepeat && dmi_check_system(dmi_use_low_level_irq)) {
+ if (!autorepeat && (use_low_level_irq ||
+ dmi_check_system(dmi_use_low_level_irq))) {
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
gpio_keys[n_buttons].irq = irq;
gpio_keys[n_buttons].gpio = -ENOENT;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index fa021af8506e..b0f776448a1c 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -192,6 +192,7 @@ static const char * const smbus_pnp_ids[] = {
"SYN3221", /* HP 15-ay000 */
"SYN323d", /* HP Spectre X360 13-w013dx */
"SYN3257", /* HP Envy 13-ad105ng */
+ "SYN3286", /* HP Laptop 15-da3001TU */
NULL
};
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index 0778dc03cd9e..46f8a694291e 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -115,18 +115,18 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_NEVER)
},
{
- /* ASUS ZenBook UX425UA */
+ /* ASUS ZenBook UX425UA/QA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425"),
},
.driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
},
{
- /* ASUS ZenBook UM325UA */
+ /* ASUS ZenBook UM325UA/QA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325"),
},
.driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
},
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index f9486495baef..6dac7c1853a5 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1543,8 +1543,6 @@ static int i8042_probe(struct platform_device *dev)
{
int error;
- i8042_platform_device = dev;
-
if (i8042_reset == I8042_RESET_ALWAYS) {
error = i8042_controller_selftest();
if (error)
@@ -1582,7 +1580,6 @@ static int i8042_probe(struct platform_device *dev)
i8042_free_aux_ports(); /* in case KBD failed but AUX not */
i8042_free_irqs();
i8042_controller_reset(false);
- i8042_platform_device = NULL;
return error;
}
@@ -1592,7 +1589,6 @@ static int i8042_remove(struct platform_device *dev)
i8042_unregister_ports();
i8042_free_irqs();
i8042_controller_reset(false);
- i8042_platform_device = NULL;
return 0;
}
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index a33cc7950cf5..c281e49826c2 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -1158,6 +1158,7 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+retry_read_config:
/* Read configuration and apply touchscreen parameters */
goodix_read_config(ts);
@@ -1165,6 +1166,16 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
touchscreen_parse_properties(ts->input_dev, true, &ts->prop);
if (!ts->prop.max_x || !ts->prop.max_y || !ts->max_touch_num) {
+ if (!ts->reset_controller_at_probe &&
+ ts->irq_pin_access_method != IRQ_PIN_ACCESS_NONE) {
+ dev_info(&ts->client->dev, "Config not set, resetting controller\n");
+ /* Retry after a controller reset */
+ ts->reset_controller_at_probe = true;
+ error = goodix_reset(ts);
+ if (error)
+ return error;
+ goto retry_read_config;
+ }
dev_err(&ts->client->dev,
"Invalid config (%d, %d, %d), using defaults\n",
ts->prop.max_x, ts->prop.max_y, ts->max_touch_num);
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index 3a4952935366..3d9c5758d8a4 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -211,12 +211,14 @@ static int raydium_i2c_send(struct i2c_client *client,
error = raydium_i2c_xfer(client, addr, xfer, ARRAY_SIZE(xfer));
if (likely(!error))
- return 0;
+ goto out;
msleep(RM_RETRY_DELAY_MS);
} while (++tries < RM_MAX_RETRIES);
dev_err(&client->dev, "%s failed: %d\n", __func__, error);
+out:
+ kfree(tx_buf);
return error;
}
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 5a8f780e7ffd..bc94059a5b87 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -820,6 +820,7 @@ int __init dmar_dev_scope_init(void)
info = dmar_alloc_pci_notify_info(dev,
BUS_NOTIFY_ADD_DEVICE);
if (!info) {
+ pci_dev_put(dev);
return dmar_dev_scope_status;
} else {
dmar_pci_bus_add_dev(info);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 48cdcd0a5cf3..5287efe247b1 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -959,11 +959,9 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
- if (domain_use_first_level(domain)) {
- pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
- if (iommu_is_dma_domain(&domain->domain))
- pteval |= DMA_FL_PTE_ACCESS;
- }
+ if (domain_use_first_level(domain))
+ pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
+
if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
@@ -1398,6 +1396,24 @@ static void domain_update_iotlb(struct dmar_domain *domain)
spin_unlock_irqrestore(&domain->lock, flags);
}
+/*
+ * The extra devTLB flush quirk impacts those QAT devices with PCI device
+ * IDs ranging from 0x4940 to 0x4943. It is exempted from risky_device()
+ * check because it applies only to the built-in QAT devices and it doesn't
+ * grant additional privileges.
+ */
+#define BUGGY_QAT_DEVID_MASK 0x494c
+static bool dev_needs_extra_dtlb_flush(struct pci_dev *pdev)
+{
+ if (pdev->vendor != PCI_VENDOR_ID_INTEL)
+ return false;
+
+ if ((pdev->device & 0xfffc) != BUGGY_QAT_DEVID_MASK)
+ return false;
+
+ return true;
+}
+
static void iommu_enable_pci_caps(struct device_domain_info *info)
{
struct pci_dev *pdev;
@@ -1480,6 +1496,7 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
qdep = info->ats_qdep;
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
qdep, addr, mask);
+ quirk_extra_dev_tlb_flush(info, addr, mask, PASID_RID2PASID, qdep);
}
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
@@ -3856,8 +3873,10 @@ static inline bool has_external_pci(void)
struct pci_dev *pdev = NULL;
for_each_pci_dev(pdev)
- if (pdev->external_facing)
+ if (pdev->external_facing) {
+ pci_dev_put(pdev);
return true;
+ }
return false;
}
@@ -4492,9 +4511,10 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
if (dev_is_pci(dev)) {
if (ecap_dev_iotlb_support(iommu->ecap) &&
pci_ats_supported(pdev) &&
- dmar_ats_supported(pdev, iommu))
+ dmar_ats_supported(pdev, iommu)) {
info->ats_supported = 1;
-
+ info->dtlb_extra_inval = dev_needs_extra_dtlb_flush(pdev);
+ }
if (sm_supported(iommu)) {
if (pasid_supported(iommu)) {
int features = pci_pasid_features(pdev);
@@ -4933,3 +4953,48 @@ static void __init check_tylersburg_isoch(void)
pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
vtisochctrl);
}
+
+/*
+ * Here we deal with a device TLB defect where device may inadvertently issue ATS
+ * invalidation completion before posted writes initiated with translated address
+ * that utilized translations matching the invalidation address range, violating
+ * the invalidation completion ordering.
+ * Therefore, any use cases that cannot guarantee DMA is stopped before unmap is
+ * vulnerable to this defect. In other words, any dTLB invalidation initiated not
+ * under the control of the trusted/privileged host device driver must use this
+ * quirk.
+ * Device TLBs are invalidated under the following six conditions:
+ * 1. Device driver does DMA API unmap IOVA
+ * 2. Device driver unbind a PASID from a process, sva_unbind_device()
+ * 3. PASID is torn down, after PASID cache is flushed. e.g. process
+ * exit_mmap() due to crash
+ * 4. Under SVA usage, called by mmu_notifier.invalidate_range() where
+ * VM has to free pages that were unmapped
+ * 5. Userspace driver unmaps a DMA buffer
+ * 6. Cache invalidation in vSVA usage (upcoming)
+ *
+ * For #1 and #2, device drivers are responsible for stopping DMA traffic
+ * before unmap/unbind. For #3, iommu driver gets mmu_notifier to
+ * invalidate TLB the same way as normal user unmap which will use this quirk.
+ * The dTLB invalidation after PASID cache flush does not need this quirk.
+ *
+ * As a reminder, #6 will *NEED* this quirk as we enable nested translation.
+ */
+void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
+ unsigned long address, unsigned long mask,
+ u32 pasid, u16 qdep)
+{
+ u16 sid;
+
+ if (likely(!info->dtlb_extra_inval))
+ return;
+
+ sid = PCI_DEVID(info->bus, info->devfn);
+ if (pasid == PASID_RID2PASID) {
+ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+ qdep, address, mask);
+ } else {
+ qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid,
+ pasid, qdep, address, mask);
+ }
+}
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 92023dff9513..db9df7c3790c 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -623,6 +623,7 @@ struct device_domain_info {
u8 pri_enabled:1;
u8 ats_supported:1;
u8 ats_enabled:1;
+ u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */
u8 ats_qdep;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
@@ -728,6 +729,9 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u32 pasid, u16 qdep, u64 addr,
unsigned int size_order);
+void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
+ unsigned long address, unsigned long pages,
+ u32 pasid, u16 qdep);
void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
u32 pasid);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index c30ddac40ee5..e13d7e5273e1 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -642,7 +642,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
* Since it is a second level only translation setup, we should
* set SRE bit as well (addresses are expected to be GPAs).
*/
- if (pasid != PASID_RID2PASID)
+ if (pasid != PASID_RID2PASID && ecap_srs(iommu->ecap))
pasid_set_sre(pte);
pasid_set_present(pte);
spin_unlock(&iommu->lock);
@@ -685,7 +685,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
* We should set SRE bit as well since the addresses are expected
* to be GPAs.
*/
- pasid_set_sre(pte);
+ if (ecap_srs(iommu->ecap))
+ pasid_set_sre(pte);
pasid_set_present(pte);
spin_unlock(&iommu->lock);
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 7d08eb034f2d..03b25358946c 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -184,10 +184,13 @@ static void __flush_svm_range_dev(struct intel_svm *svm,
return;
qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
- if (info->ats_enabled)
+ if (info->ats_enabled) {
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
svm->pasid, sdev->qdep, address,
order_base_2(pages));
+ quirk_extra_dev_tlb_flush(info, address, order_base_2(pages),
+ svm->pasid, sdev->qdep);
+ }
}
static void intel_flush_svm_range_dev(struct intel_svm *svm,
@@ -745,12 +748,16 @@ bad_req:
* If prq is to be handled outside iommu driver via receiver of
* the fault notifiers, we skip the page response here.
*/
- if (!pdev || intel_svm_prq_report(iommu, &pdev->dev, req))
- handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
+ if (!pdev)
+ goto bad_req;
- trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
- req->priv_data[0], req->priv_data[1],
- iommu->prq_seq_number++);
+ if (intel_svm_prq_report(iommu, &pdev->dev, req))
+ handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
+ else
+ trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
+ req->priv_data[0], req->priv_data[1],
+ iommu->prq_seq_number++);
+ pci_dev_put(pdev);
prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index 7ea0100f218a..90ee56d07a6e 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -222,7 +222,7 @@ mISDN_register_device(struct mISDNdevice *dev,
err = get_free_devid();
if (err < 0)
- goto error1;
+ return err;
dev->id = err;
device_initialize(&dev->dev);
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index c3b2c99b5cd5..cfbcd9e973c2 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -77,6 +77,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
if (!entry)
return -ENOMEM;
+ INIT_LIST_HEAD(&entry->list);
entry->elem = elem;
entry->dev.class = elements_class;
@@ -107,7 +108,7 @@ err2:
device_unregister(&entry->dev);
return ret;
err1:
- kfree(entry);
+ put_device(&entry->dev);
return ret;
}
EXPORT_SYMBOL(mISDN_dsp_element_register);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 9c5ef818ca36..bb786c39545e 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1858,6 +1858,8 @@ bad:
dm_io_client_destroy(c->dm_io);
bad_dm_io:
mutex_destroy(&c->lock);
+ if (c->no_sleep)
+ static_branch_dec(&no_sleep_enabled);
kfree(c);
bad_client:
return ERR_PTR(r);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 159c6806c19b..2653516bcdef 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -3630,6 +3630,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->physical_block_size =
max_t(unsigned, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
+ limits->dma_alignment = limits->logical_block_size - 1;
}
static struct target_type crypt_target = {
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index aaf2472df6e5..e97e9f97456d 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -263,6 +263,7 @@ struct dm_integrity_c {
struct completion crypto_backoff;
+ bool wrote_to_journal;
bool journal_uptodate;
bool just_formatted;
bool recalculate_flag;
@@ -2375,6 +2376,8 @@ static void integrity_commit(struct work_struct *w)
if (!commit_sections)
goto release_flush_bios;
+ ic->wrote_to_journal = true;
+
i = commit_start;
for (n = 0; n < commit_sections; n++) {
for (j = 0; j < ic->journal_section_entries; j++) {
@@ -2591,10 +2594,6 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
- /* the following test is not needed, but it tests the replay code */
- if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
- return;
-
spin_lock_irq(&ic->endio_wait.lock);
write_start = ic->committed_section;
write_sections = ic->n_committed_sections;
@@ -3101,10 +3100,17 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
drain_workqueue(ic->commit_wq);
if (ic->mode == 'J') {
- if (ic->meta_dev)
- queue_work(ic->writer_wq, &ic->writer_work);
+ queue_work(ic->writer_wq, &ic->writer_work);
drain_workqueue(ic->writer_wq);
dm_integrity_flush_buffers(ic, true);
+ if (ic->wrote_to_journal) {
+ init_journal(ic, ic->free_section,
+ ic->journal_sections - ic->free_section, ic->commit_seq);
+ if (ic->free_section) {
+ init_journal(ic, 0, ic->free_section,
+ next_commit_seq(ic->commit_seq));
+ }
+ }
}
if (ic->mode == 'B') {
@@ -3132,6 +3138,8 @@ static void dm_integrity_resume(struct dm_target *ti)
DEBUG_print("resume\n");
+ ic->wrote_to_journal = false;
+
if (ic->provided_data_sectors != old_provided_data_sectors) {
if (ic->provided_data_sectors > old_provided_data_sectors &&
ic->mode == 'B' &&
@@ -3370,6 +3378,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
+ limits->dma_alignment = limits->logical_block_size - 1;
}
}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 6b3f867d0b70..3bfc1583c20a 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -655,7 +655,7 @@ static void list_version_get_needed(struct target_type *tt, void *needed_param)
size_t *needed = needed_param;
*needed += sizeof(struct dm_target_versions);
- *needed += strlen(tt->name);
+ *needed += strlen(tt->name) + 1;
*needed += ALIGN_MASK;
}
@@ -720,7 +720,7 @@ static int __list_versions(struct dm_ioctl *param, size_t param_size, const char
iter_info.old_vers = NULL;
iter_info.vers = vers;
iter_info.flags = 0;
- iter_info.end = (char *)vers+len;
+ iter_info.end = (char *)vers + needed;
/*
* Now loop through filling out the names & versions.
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 20fd688f72e7..178e13a5b059 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -875,6 +875,7 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit
limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
limits->io_min = limits->physical_block_size;
+ limits->dma_alignment = limits->logical_block_size - 1;
}
#if IS_ENABLED(CONFIG_FS_DAX)
diff --git a/drivers/media/common/videobuf2/frame_vector.c b/drivers/media/common/videobuf2/frame_vector.c
index 542dde9d2609..144027035892 100644
--- a/drivers/media/common/videobuf2/frame_vector.c
+++ b/drivers/media/common/videobuf2/frame_vector.c
@@ -35,11 +35,7 @@
int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
struct frame_vector *vec)
{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- int ret_pin_user_pages_fast = 0;
- int ret = 0;
- int err;
+ int ret;
if (nr_frames == 0)
return 0;
@@ -52,57 +48,17 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
ret = pin_user_pages_fast(start, nr_frames,
FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
(struct page **)(vec->ptrs));
- if (ret > 0) {
- vec->got_ref = true;
- vec->is_pfns = false;
- goto out_unlocked;
- }
- ret_pin_user_pages_fast = ret;
-
- mmap_read_lock(mm);
- vec->got_ref = false;
- vec->is_pfns = true;
- ret = 0;
- do {
- unsigned long *nums = frame_vector_pfns(vec);
-
- vma = vma_lookup(mm, start);
- if (!vma)
- break;
-
- while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) {
- err = follow_pfn(vma, start, &nums[ret]);
- if (err) {
- if (ret)
- goto out;
- // If follow_pfn() returns -EINVAL, then this
- // is not an IO mapping or a raw PFN mapping.
- // In that case, return the original error from
- // pin_user_pages_fast(). Otherwise this
- // function would return -EINVAL when
- // pin_user_pages_fast() returned -ENOMEM,
- // which makes debugging hard.
- if (err == -EINVAL && ret_pin_user_pages_fast)
- ret = ret_pin_user_pages_fast;
- else
- ret = err;
- goto out;
- }
- start += PAGE_SIZE;
- ret++;
- }
- /* Bail out if VMA doesn't completely cover the tail page. */
- if (start < vma->vm_end)
- break;
- } while (ret < nr_frames);
-out:
- mmap_read_unlock(mm);
-out_unlocked:
- if (!ret)
- ret = -EFAULT;
- if (ret > 0)
- vec->nr_frames = ret;
- return ret;
+ vec->got_ref = true;
+ vec->is_pfns = false;
+ vec->nr_frames = ret;
+
+ if (likely(ret > 0))
+ return ret;
+
+ /* This used to (racily) return non-refcounted pfns. Let people know */
+ WARN_ONCE(1, "get_vaddr_frames() cannot follow VM_IO mapping");
+ vec->nr_frames = 0;
+ return ret ? ret : -EFAULT;
}
EXPORT_SYMBOL(get_vaddr_frames);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index e71068f7759b..844264e1b88c 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -854,6 +854,7 @@ static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
u32 context_id = vmci_get_context_id();
struct vmci_event_qp ev;
+ memset(&ev, 0, sizeof(ev));
ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID);
@@ -1467,6 +1468,7 @@ static int qp_notify_peer(bool attach,
* kernel.
*/
+ memset(&ev, 0, sizeof(ev));
ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 95fa8fb1d45f..de1cc9e1ae57 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1134,7 +1134,13 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
mmc_power_cycle(host, ocr);
} else {
bit = fls(ocr) - 1;
- ocr &= 3 << bit;
+ /*
+ * The bit variable represents the highest voltage bit set in
+ * the OCR register.
+ * To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V),
+ * we must shift the mask '3' with (bit - 1).
+ */
+ ocr &= 3 << (bit - 1);
if (bit != host->ios.vdd)
dev_warn(mmc_dev(host), "exceeding card's volts\n");
}
@@ -1478,6 +1484,11 @@ void mmc_init_erase(struct mmc_card *card)
card->pref_erase = 0;
}
+static bool is_trim_arg(unsigned int arg)
+{
+ return (arg & MMC_TRIM_OR_DISCARD_ARGS) && arg != MMC_DISCARD_ARG;
+}
+
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
unsigned int arg, unsigned int qty)
{
@@ -1760,7 +1771,7 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
return -EOPNOTSUPP;
- if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) &&
+ if (mmc_card_mmc(card) && is_trim_arg(arg) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
return -EOPNOTSUPP;
@@ -1790,7 +1801,7 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
* identified by the card->eg_boundary flag.
*/
rem = card->erase_size - (from % card->erase_size);
- if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
+ if ((arg & MMC_TRIM_OR_DISCARD_ARGS) && card->eg_boundary && nr > rem) {
err = mmc_do_erase(card, from, from + rem - 1, arg);
from += rem;
if ((err) || (to <= from))
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 8d9bceeff986..155ce2bdfe62 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -3179,7 +3179,8 @@ static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
struct mmc_test_dbgfs_file *df;
if (card->debugfs_root)
- debugfs_create_file(name, mode, card->debugfs_root, card, fops);
+ file = debugfs_create_file(name, mode, card->debugfs_root,
+ card, fops);
df = kmalloc(sizeof(*df), GFP_KERNEL);
if (!df) {
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index df941438aef5..26bc59b5a7cc 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -2588,13 +2588,11 @@ static int msdc_of_clock_parse(struct platform_device *pdev,
return PTR_ERR(host->src_clk_cg);
}
- host->sys_clk_cg = devm_clk_get_optional(&pdev->dev, "sys_cg");
+ /* If present, always enable for this clock gate */
+ host->sys_clk_cg = devm_clk_get_optional_enabled(&pdev->dev, "sys_cg");
if (IS_ERR(host->sys_clk_cg))
host->sys_clk_cg = NULL;
- /* If present, always enable for this clock gate */
- clk_prepare_enable(host->sys_clk_cg);
-
host->bulk_clks[0].id = "pclk_cg";
host->bulk_clks[1].id = "axi_cg";
host->bulk_clks[2].id = "ahb_cg";
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 31ea0a2fce35..ffeb5759830f 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1512,7 +1512,7 @@ static void esdhc_cqe_enable(struct mmc_host *mmc)
* system resume back.
*/
cqhci_writel(cq_host, 0, CQHCI_CTL);
- if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT)
+ if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
dev_err(mmc_dev(host->mmc),
"failed to exit halt state when enable CQE\n");
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 34ea1acbb3cc..28dc65023fa9 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1749,6 +1749,8 @@ static int amd_probe(struct sdhci_pci_chip *chip)
}
}
+ pci_dev_put(smbus_dev);
+
if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index ad457cd9cbaa..bca1d095b759 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -32,6 +32,7 @@
#define O2_SD_CAPS 0xE0
#define O2_SD_ADMA1 0xE2
#define O2_SD_ADMA2 0xE7
+#define O2_SD_MISC_CTRL2 0xF0
#define O2_SD_INF_MOD 0xF1
#define O2_SD_MISC_CTRL4 0xFC
#define O2_SD_MISC_CTRL 0x1C0
@@ -877,6 +878,12 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
/* Set Tuning Windows to 5 */
pci_write_config_byte(chip->pdev,
O2_SD_TUNING_CTRL, 0x55);
+ //Adjust 1st and 2nd CD debounce time
+ pci_read_config_dword(chip->pdev, O2_SD_MISC_CTRL2, &scratch_32);
+ scratch_32 &= 0xFFE7FFFF;
+ scratch_32 |= 0x00180000;
+ pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL2, scratch_32);
+ pci_write_config_dword(chip->pdev, O2_SD_DETECT_SETTING, 1);
/* Lock WP */
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index b92a408f138d..bec3f9e3cd3f 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -470,7 +470,7 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
}
if (IS_ERR(sprd_host->pinctrl))
- return 0;
+ goto reset;
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_180:
@@ -498,6 +498,8 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
/* Wait for 300 ~ 500 us for pin state stable */
usleep_range(300, 500);
+
+reset:
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
return 0;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index fef03de85b99..c7ad32a75b57 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -373,6 +373,7 @@ static void sdhci_init(struct sdhci_host *host, int soft)
if (soft) {
/* force clock reconfiguration */
host->clock = 0;
+ host->reinit_uhs = true;
mmc->ops->set_ios(mmc, &mmc->ios);
}
}
@@ -2293,11 +2294,46 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
}
EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
+static bool sdhci_timing_has_preset(unsigned char timing)
+{
+ switch (timing) {
+ case MMC_TIMING_UHS_SDR12:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ return true;
+ };
+ return false;
+}
+
+static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing)
+{
+ return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
+ sdhci_timing_has_preset(timing);
+}
+
+static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios)
+{
+ /*
+ * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK
+ * Frequency. Check if preset values need to be enabled, or the Driver
+ * Strength needs updating. Note, clock changes are handled separately.
+ */
+ return !host->preset_enabled &&
+ (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type);
+}
+
void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
+ bool reinit_uhs = host->reinit_uhs;
+ bool turning_on_clk = false;
u8 ctrl;
+ host->reinit_uhs = false;
+
if (ios->power_mode == MMC_POWER_UNDEFINED)
return;
@@ -2323,6 +2359,8 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sdhci_enable_preset_value(host, false);
if (!ios->clock || ios->clock != host->clock) {
+ turning_on_clk = ios->clock && !host->clock;
+
host->ops->set_clock(host, ios->clock);
host->clock = ios->clock;
@@ -2349,6 +2387,17 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->ops->set_bus_width(host, ios->bus_width);
+ /*
+ * Special case to avoid multiple clock changes during voltage
+ * switching.
+ */
+ if (!reinit_uhs &&
+ turning_on_clk &&
+ host->timing == ios->timing &&
+ host->version >= SDHCI_SPEC_300 &&
+ !sdhci_presetable_values_change(host, ios))
+ return;
+
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
@@ -2392,6 +2441,7 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+ host->drv_type = ios->drv_type;
} else {
/*
* According to SDHC Spec v3.00, if the Preset Value
@@ -2419,19 +2469,14 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->ops->set_uhs_signaling(host, ios->timing);
host->timing = ios->timing;
- if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
- ((ios->timing == MMC_TIMING_UHS_SDR12) ||
- (ios->timing == MMC_TIMING_UHS_SDR25) ||
- (ios->timing == MMC_TIMING_UHS_SDR50) ||
- (ios->timing == MMC_TIMING_UHS_SDR104) ||
- (ios->timing == MMC_TIMING_UHS_DDR50) ||
- (ios->timing == MMC_TIMING_MMC_DDR52))) {
+ if (sdhci_preset_needed(host, ios->timing)) {
u16 preset;
sdhci_enable_preset_value(host, true);
preset = sdhci_get_preset_value(host);
ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
preset);
+ host->drv_type = ios->drv_type;
}
/* Re-enable SD Clock */
@@ -3768,6 +3813,7 @@ int sdhci_resume_host(struct sdhci_host *host)
sdhci_init(host, 0);
host->pwr = 0;
host->clock = 0;
+ host->reinit_uhs = true;
mmc->ops->set_ios(mmc, &mmc->ios);
} else {
sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER));
@@ -3830,6 +3876,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
/* Force clock and power re-program */
host->pwr = 0;
host->clock = 0;
+ host->reinit_uhs = true;
mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
mmc->ops->set_ios(mmc, &mmc->ios);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index d750c464bd1e..87a3aaa07438 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -524,6 +524,8 @@ struct sdhci_host {
unsigned int clock; /* Current clock (MHz) */
u8 pwr; /* Current voltage */
+ u8 drv_type; /* Current UHS-I driver type */
+ bool reinit_uhs; /* Force UHS-related re-initialization */
bool runtime_suspended; /* Host is runtime suspended */
bool bus_on; /* Bus power prevents runtime suspend */
diff --git a/drivers/mtd/nand/onenand/Kconfig b/drivers/mtd/nand/onenand/Kconfig
index 34d9a7a82ad4..c94bf483541e 100644
--- a/drivers/mtd/nand/onenand/Kconfig
+++ b/drivers/mtd/nand/onenand/Kconfig
@@ -26,6 +26,7 @@ config MTD_ONENAND_OMAP2
tristate "OneNAND on OMAP2/OMAP3 support"
depends on ARCH_OMAP2 || ARCH_OMAP3 || (COMPILE_TEST && ARM)
depends on OF || COMPILE_TEST
+ depends on OMAP_GPMC
help
Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC
via the GPMC memory controller.
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 33f2c98a030e..c3cc66039925 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -5834,7 +5834,7 @@ nand_match_ecc_req(struct nand_chip *chip,
int req_step = requirements->step_size;
int req_strength = requirements->strength;
int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
- int best_step, best_strength, best_ecc_bytes;
+ int best_step = 0, best_strength = 0, best_ecc_bytes = 0;
int best_ecc_bytes_total = INT_MAX;
int i, j;
@@ -5915,7 +5915,7 @@ nand_maximize_ecc(struct nand_chip *chip,
int step_size, strength, nsteps, ecc_bytes, corr;
int best_corr = 0;
int best_step = 0;
- int best_strength, best_ecc_bytes;
+ int best_strength = 0, best_ecc_bytes = 0;
int i, j;
for (i = 0; i < caps->nstepinfos; i++) {
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 8f80019a9f01..198a44794d2d 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -3167,16 +3167,18 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
if (ret)
- nand_cleanup(chip);
+ goto err;
if (nandc->props->use_codeword_fixup) {
ret = qcom_nand_host_parse_boot_partitions(nandc, host, dn);
- if (ret) {
- nand_cleanup(chip);
- return ret;
- }
+ if (ret)
+ goto err;
}
+ return 0;
+
+err:
+ nand_cleanup(chip);
return ret;
}
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index 24150c933fcb..dc3253b318da 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -113,6 +113,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
struct com20020_dev *info;
struct net_device *dev;
struct arcnet_local *lp;
+ int ret = -ENOMEM;
dev_dbg(&p_dev->dev, "com20020_attach()\n");
@@ -142,12 +143,18 @@ static int com20020_probe(struct pcmcia_device *p_dev)
info->dev = dev;
p_dev->priv = info;
- return com20020_config(p_dev);
+ ret = com20020_config(p_dev);
+ if (ret)
+ goto fail_config;
+
+ return 0;
+fail_config:
+ free_arcdev(dev);
fail_alloc_dev:
kfree(info);
fail_alloc_info:
- return -ENOMEM;
+ return ret;
} /* com20020_attach */
static void com20020_detach(struct pcmcia_device *link)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e84c49bf4d0c..f298b9b3eb77 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3231,16 +3231,23 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct slave *curr_active_slave, *curr_arp_slave;
- struct icmp6hdr *hdr = icmp6_hdr(skb);
struct in6_addr *saddr, *daddr;
+ struct {
+ struct ipv6hdr ip6;
+ struct icmp6hdr icmp6;
+ } *combined, _combined;
if (skb->pkt_type == PACKET_OTHERHOST ||
- skb->pkt_type == PACKET_LOOPBACK ||
- hdr->icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
+ skb->pkt_type == PACKET_LOOPBACK)
+ goto out;
+
+ combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
+ if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
+ combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
goto out;
- saddr = &ipv6_hdr(skb)->saddr;
- daddr = &ipv6_hdr(skb)->daddr;
+ saddr = &combined->ip6.saddr;
+ daddr = &combined->ip6.saddr;
slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
__func__, slave->dev->name, bond_slave_state(slave),
diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c
index 094197780776..ed3d0b8989a0 100644
--- a/drivers/net/can/can327.c
+++ b/drivers/net/can/can327.c
@@ -263,8 +263,10 @@ static void can327_feed_frame_to_netdev(struct can327 *elm, struct sk_buff *skb)
{
lockdep_assert_held(&elm->lock);
- if (!netif_running(elm->dev))
+ if (!netif_running(elm->dev)) {
+ kfree_skb(skb);
return;
+ }
/* Queue for NAPI pickup.
* rx-offload will update stats and LEDs for us.
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index 194c86e0f340..8f6dccd5a587 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -264,22 +264,24 @@ static int cc770_isa_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev,
"couldn't register device (err=%d)\n", err);
- goto exit_unmap;
+ goto exit_free;
}
dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n",
priv->reg_base, dev->irq);
return 0;
- exit_unmap:
+exit_free:
+ free_cc770dev(dev);
+exit_unmap:
if (mem[idx])
iounmap(base);
- exit_release:
+exit_release:
if (mem[idx])
release_mem_region(mem[idx], iosize);
else
release_region(port[idx], iosize);
- exit:
+exit:
return err;
}
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 00d11e95fd98..e5575d2755e4 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1909,7 +1909,7 @@ int m_can_class_get_clocks(struct m_can_classdev *cdev)
cdev->hclk = devm_clk_get(cdev->dev, "hclk");
cdev->cclk = devm_clk_get(cdev->dev, "cclk");
- if (IS_ERR(cdev->cclk)) {
+ if (IS_ERR(cdev->hclk) || IS_ERR(cdev->cclk)) {
dev_err(cdev->dev, "no clock found\n");
ret = -ENODEV;
}
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index 8f184a852a0a..f2219aa2824b 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -120,7 +120,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
- return ret;
+ goto err_free_dev;
mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0);
@@ -132,7 +132,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
ret = m_can_class_register(mcan_class);
if (ret)
- goto err;
+ goto err_free_irq;
/* Enable interrupt control at CAN wrapper IP */
writel(0x1, base + CTL_CSR_INT_CTL_OFFSET);
@@ -144,8 +144,10 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
return 0;
-err:
+err_free_irq:
pci_free_irq_vectors(pci);
+err_free_dev:
+ m_can_class_free_dev(mcan_class->net);
return ret;
}
@@ -161,6 +163,7 @@ static void m_can_pci_remove(struct pci_dev *pci)
writel(0x0, priv->base + CTL_CSR_INT_CTL_OFFSET);
m_can_class_unregister(mcan_class);
+ m_can_class_free_dev(mcan_class->net);
pci_free_irq_vectors(pci);
}
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index d513fac50718..db3e767d5320 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -202,22 +202,24 @@ static int sja1000_isa_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
- goto exit_unmap;
+ goto exit_free;
}
dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n",
DRV_NAME, priv->reg_base, dev->irq);
return 0;
- exit_unmap:
+exit_free:
+ free_sja1000dev(dev);
+exit_unmap:
if (mem[idx])
iounmap(base);
- exit_release:
+exit_release:
if (mem[idx])
release_mem_region(mem[idx], iosize);
else
release_region(port[idx], iosize);
- exit:
+exit:
return err;
}
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 25f863b4f5f0..ddb7c5735c9a 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -2091,8 +2091,11 @@ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx)
netdev->dev_port = channel_idx;
ret = register_candev(netdev);
- if (ret)
+ if (ret) {
+ es58x_dev->netdev[channel_idx] = NULL;
+ free_candev(netdev);
return ret;
+ }
netdev_queue_set_dql_min_limit(netdev_get_tx_queue(netdev, 0),
es58x_dev->param->dql_min_limit);
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 218b098b261d..47619e9cb005 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -47,6 +47,10 @@
#define MCBA_VER_REQ_USB 1
#define MCBA_VER_REQ_CAN 2
+/* Drive the CAN_RES signal LOW "0" to activate R24 and R25 */
+#define MCBA_VER_TERMINATION_ON 0
+#define MCBA_VER_TERMINATION_OFF 1
+
#define MCBA_SIDL_EXID_MASK 0x8
#define MCBA_DLC_MASK 0xf
#define MCBA_DLC_RTR_MASK 0x40
@@ -463,7 +467,7 @@ static void mcba_usb_process_ka_usb(struct mcba_priv *priv,
priv->usb_ka_first_pass = false;
}
- if (msg->termination_state)
+ if (msg->termination_state == MCBA_VER_TERMINATION_ON)
priv->can.termination = MCBA_TERMINATION_ENABLED;
else
priv->can.termination = MCBA_TERMINATION_DISABLED;
@@ -785,9 +789,9 @@ static int mcba_set_termination(struct net_device *netdev, u16 term)
};
if (term == MCBA_TERMINATION_ENABLED)
- usb_msg.termination = 1;
+ usb_msg.termination = MCBA_VER_TERMINATION_ON;
else
- usb_msg.termination = 0;
+ usb_msg.termination = MCBA_VER_TERMINATION_OFF;
mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg);
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 438e46af03e9..80f07bd20593 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -961,7 +961,7 @@ static const struct lan9303_mib_desc lan9303_mib[] = {
{ .offset = LAN9303_MAC_TX_BRDCST_CNT_0, .name = "TxBroad", },
{ .offset = LAN9303_MAC_TX_PAUSE_CNT_0, .name = "TxPause", },
{ .offset = LAN9303_MAC_TX_MULCST_CNT_0, .name = "TxMulti", },
- { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "TxUnderRun", },
+ { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "RxShort", },
{ .offset = LAN9303_MAC_TX_64_CNT_0, .name = "Tx64Byte", },
{ .offset = LAN9303_MAC_TX_127_CNT_0, .name = "Tx128Byte", },
{ .offset = LAN9303_MAC_TX_255_CNT_0, .name = "Tx256Byte", },
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 215dd17ca790..4059fcc8c832 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -256,6 +256,9 @@ static int sja1105_base_tx_mdio_read(struct mii_bus *bus, int phy, int reg)
u32 tmp;
int rc;
+ if (reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
rc = sja1105_xfer_u32(priv, SPI_READ, regs->mdio_100base_tx + reg,
&tmp, NULL);
if (rc < 0)
@@ -272,6 +275,9 @@ static int sja1105_base_tx_mdio_write(struct mii_bus *bus, int phy, int reg,
const struct sja1105_regs *regs = priv->info->regs;
u32 tmp = val;
+ if (reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
return sja1105_xfer_u32(priv, SPI_WRITE, regs->mdio_100base_tx + reg,
&tmp, NULL);
}
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 7633b227b2ca..711d5b5a4c49 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -990,6 +990,7 @@ static int tse_shutdown(struct net_device *dev)
int ret;
phylink_stop(priv->phylink);
+ phylink_disconnect_phy(priv->phylink);
netif_stop_queue(dev);
napi_disable(&priv->napi);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index d350eeec8bad..5a454b58498f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -4543,13 +4543,19 @@ static struct pci_driver ena_pci_driver = {
static int __init ena_init(void)
{
+ int ret;
+
ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
if (!ena_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
}
- return pci_register_driver(&ena_pci_driver);
+ ret = pci_register_driver(&ena_pci_driver);
+ if (ret)
+ destroy_workqueue(ena_wq);
+
+ return ret;
}
static void __exit ena_cleanup(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a08f221e30d4..ac4ea93bd8dd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -13,6 +13,7 @@
#include "aq_ptp.h"
#include "aq_filters.h"
#include "aq_macsec.h"
+#include "aq_main.h"
#include <linux/ptp_clock_kernel.h>
@@ -858,7 +859,7 @@ static int aq_set_ringparam(struct net_device *ndev,
if (netif_running(ndev)) {
ndev_running = true;
- dev_close(ndev);
+ aq_ndev_close(ndev);
}
cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
@@ -874,7 +875,7 @@ static int aq_set_ringparam(struct net_device *ndev,
goto err_exit;
if (ndev_running)
- err = dev_open(ndev, NULL);
+ err = aq_ndev_open(ndev);
err_exit:
return err;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 8a0af371e7dc..77609dc0a08d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -58,7 +58,7 @@ struct net_device *aq_ndev_alloc(void)
return ndev;
}
-static int aq_ndev_open(struct net_device *ndev)
+int aq_ndev_open(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
@@ -88,7 +88,7 @@ err_exit:
return err;
}
-static int aq_ndev_close(struct net_device *ndev)
+int aq_ndev_close(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index 99870865f66d..a78c1a168d8e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -16,5 +16,7 @@ DECLARE_STATIC_KEY_FALSE(aq_xdp_locking_key);
void aq_ndev_schedule_work(struct work_struct *work);
struct net_device *aq_ndev_alloc(void);
+int aq_ndev_open(struct net_device *ndev);
+int aq_ndev_close(struct net_device *ndev);
#endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index cc932b3cf873..4a1efe9b37d0 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1427,7 +1427,7 @@ static int ag71xx_open(struct net_device *ndev)
if (ret) {
netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
ret);
- goto err;
+ return ret;
}
max_frame_len = ag71xx_max_frame_len(ndev->mtu);
@@ -1448,6 +1448,7 @@ static int ag71xx_open(struct net_device *ndev)
err:
ag71xx_rings_cleanup(ag);
+ phylink_disconnect_phy(ag->phylink);
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 5fb3af5670ec..3038386a5afd 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1568,7 +1568,6 @@ void bgmac_enet_remove(struct bgmac *bgmac)
phy_disconnect(bgmac->net_dev->phydev);
netif_napi_del(&bgmac->napi);
bgmac_dma_free(bgmac);
- free_netdev(bgmac->net_dev);
}
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 11d15cd03600..77d4cb4ad782 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -795,16 +795,20 @@ static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
{
- struct pci_dev *dev;
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+ struct pci_dev *dev;
+ bool pending;
if (!vf)
return false;
dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
- if (dev)
- return bnx2x_is_pcie_pending(dev);
- return false;
+ if (!dev)
+ return false;
+ pending = bnx2x_is_pcie_pending(dev);
+ pci_dev_put(dev);
+
+ return pending;
}
int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c78b6e9dea2c..9f8a6ce4b356 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -14037,8 +14037,16 @@ static struct pci_driver bnxt_pci_driver = {
static int __init bnxt_init(void)
{
+ int err;
+
bnxt_debug_init();
- return pci_register_driver(&bnxt_pci_driver);
+ err = pci_register_driver(&bnxt_pci_driver);
+ if (err) {
+ bnxt_debug_exit();
+ return err;
+ }
+
+ return 0;
}
static void __exit bnxt_exit(void)
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index d312bd594935..98793b2ac2c7 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1794,13 +1794,10 @@ static int liquidio_open(struct net_device *netdev)
ifstate_set(lio, LIO_IFSTATE_RUNNING);
- if (OCTEON_CN23XX_PF(oct)) {
- if (!oct->msix_on)
- if (setup_tx_poll_fn(netdev))
- return -1;
- } else {
- if (setup_tx_poll_fn(netdev))
- return -1;
+ if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) {
+ ret = setup_tx_poll_fn(netdev);
+ if (ret)
+ goto err_poll;
}
netif_tx_start_all_queues(netdev);
@@ -1813,7 +1810,7 @@ static int liquidio_open(struct net_device *netdev)
/* tell Octeon to start forwarding packets to host */
ret = send_rx_ctrl_cmd(lio, 1);
if (ret)
- return ret;
+ goto err_rx_ctrl;
/* start periodical statistics fetch */
INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
@@ -1824,6 +1821,27 @@ static int liquidio_open(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
netdev->name);
+ return 0;
+
+err_rx_ctrl:
+ if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on)
+ cleanup_tx_poll_fn(netdev);
+err_poll:
+ if (lio->ptp_clock) {
+ ptp_clock_unregister(lio->ptp_clock);
+ lio->ptp_clock = NULL;
+ }
+
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 0;
+ }
+
return ret;
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 2f6484dc186a..7eb2ddbe9bad 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1436,8 +1436,10 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
return AE_OK;
}
- if (strncmp(string.pointer, bgx_sel, 4))
+ if (strncmp(string.pointer, bgx_sel, 4)) {
+ kfree(string.pointer);
return AE_OK;
+ }
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
bgx_acpi_register_phy, NULL, bgx, NULL);
diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c
index a523ddda7609..de7105a84747 100644
--- a/drivers/net/ethernet/davicom/dm9051.c
+++ b/drivers/net/ethernet/davicom/dm9051.c
@@ -798,8 +798,10 @@ static int dm9051_loop_rx(struct board_info *db)
}
ret = dm9051_stop_mrcmd(db);
- if (ret)
+ if (ret) {
+ dev_kfree_skb(skb);
return ret;
+ }
skb->protocol = eth_type_trans(skb, db->ndev);
if (db->ndev->features & NETIF_F_RXCSUM)
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 48fb391951dd..13d5ff4e0e02 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -542,6 +542,27 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
return (budget != 0);
}
+static bool tsnep_tx_pending(struct tsnep_tx *tx)
+{
+ unsigned long flags;
+ struct tsnep_tx_entry *entry;
+ bool pending = false;
+
+ spin_lock_irqsave(&tx->lock, flags);
+
+ if (tx->read != tx->write) {
+ entry = &tx->entry[tx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_TX_DESC_OWNER_MASK) ==
+ (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
+ pending = true;
+ }
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return pending;
+}
+
static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
int queue_index, struct tsnep_tx *tx)
{
@@ -821,6 +842,19 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
return done;
}
+static bool tsnep_rx_pending(struct tsnep_rx *rx)
+{
+ struct tsnep_rx_entry *entry;
+
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) ==
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ return true;
+
+ return false;
+}
+
static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
int queue_index, struct tsnep_rx *rx)
{
@@ -866,6 +900,17 @@ static void tsnep_rx_close(struct tsnep_rx *rx)
tsnep_rx_ring_cleanup(rx);
}
+static bool tsnep_pending(struct tsnep_queue *queue)
+{
+ if (queue->tx && tsnep_tx_pending(queue->tx))
+ return true;
+
+ if (queue->rx && tsnep_rx_pending(queue->rx))
+ return true;
+
+ return false;
+}
+
static int tsnep_poll(struct napi_struct *napi, int budget)
{
struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
@@ -886,9 +931,19 @@ static int tsnep_poll(struct napi_struct *napi, int budget)
if (!complete)
return budget;
- if (likely(napi_complete_done(napi, done)))
+ if (likely(napi_complete_done(napi, done))) {
tsnep_enable_irq(queue->adapter, queue->irq_mask);
+ /* reschedule if work is already pending, prevent rotten packets
+ * which are transmitted or received after polling but before
+ * interrupt enable
+ */
+ if (tsnep_pending(queue)) {
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ napi_schedule(napi);
+ }
+ }
+
return min(done, budget - 1);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index f8c06c3f9464..8671591cb750 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2058,7 +2058,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
/* enable Tx ints by setting pkt thr to 1 */
enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
- tbmr = ENETC_TBMR_EN;
+ tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio);
if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
tbmr |= ENETC_TBMR_VIH;
@@ -2461,7 +2461,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
/* Reset all ring priorities to 0 */
for (i = 0; i < priv->num_tx_rings; i++) {
tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(hw, tx_ring->index, 0);
+ tx_ring->prio = 0;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
}
return 0;
@@ -2480,7 +2481,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
*/
for (i = 0; i < num_tc; i++) {
tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(hw, tx_ring->index, i);
+ tx_ring->prio = i;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
}
/* Reset the number of netdev queues based on the TC count */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 161930a65f61..c6d8cc15c270 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -95,6 +95,7 @@ struct enetc_bdr {
void __iomem *rcir;
};
u16 index;
+ u16 prio;
int bd_count; /* # of BDs */
int next_to_use;
int next_to_clean;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index a842e1999122..fcebb54224c0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -137,6 +137,7 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
struct tc_taprio_qopt_offload *taprio = type_data;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_bdr *tx_ring;
int err;
int i;
@@ -145,16 +146,20 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
if (priv->tx_ring[i]->tsd_enable)
return -EBUSY;
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
- taprio->enable ? i : 0);
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ tx_ring->prio = taprio->enable ? i : 0;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
err = enetc_setup_taprio(ndev, taprio);
-
- if (err)
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
- taprio->enable ? 0 : i);
+ if (err) {
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ tx_ring->prio = taprio->enable ? 0 : i;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
+ }
return err;
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f623c12eaf95..2ca2b61b451f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -74,7 +74,7 @@
#include "fec.h"
static void set_multicast_list(struct net_device *ndev);
-static void fec_enet_itr_coal_init(struct net_device *ndev);
+static void fec_enet_itr_coal_set(struct net_device *ndev);
#define DRIVER_NAME "fec"
@@ -1220,8 +1220,7 @@ fec_restart(struct net_device *ndev)
writel(0, fep->hwp + FEC_IMASK);
/* Init the interrupt coalescing */
- fec_enet_itr_coal_init(ndev);
-
+ fec_enet_itr_coal_set(ndev);
}
static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
@@ -2856,19 +2855,6 @@ static int fec_enet_set_coalesce(struct net_device *ndev,
return 0;
}
-static void fec_enet_itr_coal_init(struct net_device *ndev)
-{
- struct ethtool_coalesce ec;
-
- ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
- ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
-
- ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
- ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
-
- fec_enet_set_coalesce(ndev, &ec, NULL, NULL);
-}
-
static int fec_enet_get_tunable(struct net_device *netdev,
const struct ethtool_tunable *tuna,
void *data)
@@ -3623,6 +3609,10 @@ static int fec_enet_init(struct net_device *ndev)
fep->rx_align = 0x3;
fep->tx_align = 0x3;
#endif
+ fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
+ fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
+ fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT;
+ fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT;
/* Check mask of the streaming and coherent API */
ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 0179fc288f5f..17137de9338c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -819,7 +819,6 @@ struct hnae3_knic_private_info {
const struct hnae3_dcb_ops *dcb_ops;
u16 int_rl_setting;
- enum pkt_hash_types rss_type;
void __iomem *io_base;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
index e23729ac3bb8..ae2736549526 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
@@ -191,23 +191,6 @@ u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle)
return HCLGE_COMM_RSS_KEY_SIZE;
}
-void hclge_comm_get_rss_type(struct hnae3_handle *nic,
- struct hclge_comm_rss_tuple_cfg *rss_tuple_sets)
-{
- if (rss_tuple_sets->ipv4_tcp_en ||
- rss_tuple_sets->ipv4_udp_en ||
- rss_tuple_sets->ipv4_sctp_en ||
- rss_tuple_sets->ipv6_tcp_en ||
- rss_tuple_sets->ipv6_udp_en ||
- rss_tuple_sets->ipv6_sctp_en)
- nic->kinfo.rss_type = PKT_HASH_TYPE_L4;
- else if (rss_tuple_sets->ipv4_fragment_en ||
- rss_tuple_sets->ipv6_fragment_en)
- nic->kinfo.rss_type = PKT_HASH_TYPE_L3;
- else
- nic->kinfo.rss_type = PKT_HASH_TYPE_NONE;
-}
-
int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
const u8 hfunc, u8 *hash_algo)
{
@@ -344,9 +327,6 @@ int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,
req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
- if (is_pf)
- hclge_comm_get_rss_type(nic, &rss_cfg->rss_tuple_sets);
-
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret)
dev_err(&hw->cmq.csq.pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
index 946d166a452d..92af3d2980d3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
@@ -95,8 +95,6 @@ struct hclge_comm_rss_tc_mode_cmd {
};
u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle);
-void hclge_comm_get_rss_type(struct hnae3_handle *nic,
- struct hclge_comm_rss_tuple_cfg *rss_tuple_sets);
void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_cfg *rss_cfg);
int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 4cb2421e71a7..028577943ec5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -105,26 +105,28 @@ static const struct pci_device_id hns3_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
-#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t) \
+#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t, h) \
{ ptype, \
l, \
CHECKSUM_##s, \
HNS3_L3_TYPE_##t, \
- 1 }
+ 1, \
+ h}
#define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \
- { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0 }
+ { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0, \
+ PKT_HASH_TYPE_NONE }
static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
HNS3_RX_PTYPE_UNUSED_ENTRY(0),
- HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP),
- HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP),
- HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP),
- HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM),
- HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL),
+ HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
HNS3_RX_PTYPE_UNUSED_ENTRY(9),
HNS3_RX_PTYPE_UNUSED_ENTRY(10),
HNS3_RX_PTYPE_UNUSED_ENTRY(11),
@@ -132,36 +134,36 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
HNS3_RX_PTYPE_UNUSED_ENTRY(13),
HNS3_RX_PTYPE_UNUSED_ENTRY(14),
HNS3_RX_PTYPE_UNUSED_ENTRY(15),
- HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4),
- HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4),
- HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4),
- HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4),
+ HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
HNS3_RX_PTYPE_UNUSED_ENTRY(26),
HNS3_RX_PTYPE_UNUSED_ENTRY(27),
HNS3_RX_PTYPE_UNUSED_ENTRY(28),
- HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
HNS3_RX_PTYPE_UNUSED_ENTRY(38),
- HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
HNS3_RX_PTYPE_UNUSED_ENTRY(46),
HNS3_RX_PTYPE_UNUSED_ENTRY(47),
HNS3_RX_PTYPE_UNUSED_ENTRY(48),
@@ -227,35 +229,35 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
HNS3_RX_PTYPE_UNUSED_ENTRY(108),
HNS3_RX_PTYPE_UNUSED_ENTRY(109),
HNS3_RX_PTYPE_UNUSED_ENTRY(110),
- HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6),
- HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6),
- HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6),
- HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6),
+ HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
HNS3_RX_PTYPE_UNUSED_ENTRY(120),
HNS3_RX_PTYPE_UNUSED_ENTRY(121),
HNS3_RX_PTYPE_UNUSED_ENTRY(122),
- HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
HNS3_RX_PTYPE_UNUSED_ENTRY(132),
- HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
HNS3_RX_PTYPE_UNUSED_ENTRY(140),
HNS3_RX_PTYPE_UNUSED_ENTRY(141),
HNS3_RX_PTYPE_UNUSED_ENTRY(142),
@@ -3776,8 +3778,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
desc_cb->reuse_flag = 1;
} else if (frag_size <= ring->rx_copybreak) {
ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
- if (ret)
- goto out;
+ if (!ret)
+ return;
}
out:
@@ -4171,15 +4173,35 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
}
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
- struct sk_buff *skb, u32 rss_hash)
+ struct sk_buff *skb, u32 rss_hash,
+ u32 l234info, u32 ol_info)
{
- struct hnae3_handle *handle = ring->tqp->handle;
- enum pkt_hash_types rss_type;
+ enum pkt_hash_types rss_type = PKT_HASH_TYPE_NONE;
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
- if (rss_hash)
- rss_type = handle->kinfo.rss_type;
- else
- rss_type = PKT_HASH_TYPE_NONE;
+ if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
+ u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S);
+
+ rss_type = hns3_rx_ptype_tbl[ptype].hash_type;
+ } else {
+ int l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ int l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
+ HNS3_RXD_L4ID_S);
+
+ if (l3_type == HNS3_L3_TYPE_IPV4 ||
+ l3_type == HNS3_L3_TYPE_IPV6) {
+ if (l4_type == HNS3_L4_TYPE_UDP ||
+ l4_type == HNS3_L4_TYPE_TCP ||
+ l4_type == HNS3_L4_TYPE_SCTP)
+ rss_type = PKT_HASH_TYPE_L4;
+ else if (l4_type == HNS3_L4_TYPE_IGMP ||
+ l4_type == HNS3_L4_TYPE_ICMP)
+ rss_type = PKT_HASH_TYPE_L3;
+ }
+ }
skb_set_hash(skb, rss_hash, rss_type);
}
@@ -4282,7 +4304,8 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ring->tqp_vector->rx_group.total_bytes += len;
- hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
+ hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash),
+ l234info, ol_info);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 133a054af6b7..294a14b4fdef 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -404,6 +404,7 @@ struct hns3_rx_ptype {
u32 ip_summed : 2;
u32 l3_type : 4;
u32 valid : 1;
+ u32 hash_type: 3;
};
struct ring_stats {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 987271da6e9b..4e54f91f7a6c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3443,6 +3443,7 @@ static int hclge_update_tp_port_info(struct hclge_dev *hdev)
hdev->hw.mac.autoneg = cmd.base.autoneg;
hdev->hw.mac.speed = cmd.base.speed;
hdev->hw.mac.duplex = cmd.base.duplex;
+ linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising);
return 0;
}
@@ -4859,7 +4860,6 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
return ret;
}
- hclge_comm_get_rss_type(&vport->nic, &hdev->rss_cfg.rss_tuple_sets);
return 0;
}
@@ -11587,9 +11587,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_msi_irq_uninit;
- if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
- !hnae3_dev_phy_imp_supported(hdev)) {
- ret = hclge_mac_mdio_config(hdev);
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
+ if (hnae3_dev_phy_imp_supported(hdev))
+ ret = hclge_update_tp_port_info(hdev);
+ else
+ ret = hclge_mac_mdio_config(hdev);
+
if (ret)
goto err_msi_irq_uninit;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index e1f54a2f28b2..2d6906aba2a2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -1474,8 +1474,15 @@ static struct pci_driver hinic_driver = {
static int __init hinic_module_init(void)
{
+ int ret;
+
hinic_dbg_register_debugfs(HINIC_DRV_NAME);
- return pci_register_driver(&hinic_driver);
+
+ ret = pci_register_driver(&hinic_driver);
+ if (ret)
+ hinic_dbg_unregister_debugfs();
+
+ return ret;
}
static void __exit hinic_module_exit(void)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 560d1d442232..d3fdc290937f 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1741,11 +1741,8 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
/* If we can't map the skb, have the upper layer try later */
- if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
- dev_kfree_skb_any(skb);
- skb = NULL;
+ if (dma_mapping_error(&nic->pdev->dev, dma_addr))
return -ENOMEM;
- }
/*
* Use the last 4 bytes of the SKB payload packet as the CRC, used for
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 4a6630586ec9..fc373472e4e1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -32,6 +32,8 @@ struct workqueue_struct *fm10k_workqueue;
**/
static int __init fm10k_init_module(void)
{
+ int ret;
+
pr_info("%s\n", fm10k_driver_string);
pr_info("%s\n", fm10k_copyright);
@@ -43,7 +45,13 @@ static int __init fm10k_init_module(void)
fm10k_dbg_init();
- return fm10k_register_pci_driver();
+ ret = fm10k_register_pci_driver();
+ if (ret) {
+ fm10k_dbg_exit();
+ destroy_workqueue(fm10k_workqueue);
+ }
+
+ return ret;
}
module_init(fm10k_init_module);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b5dcd15ced36..b3cb587a2032 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -16644,6 +16644,8 @@ static struct pci_driver i40e_driver = {
**/
static int __init i40e_init_module(void)
{
+ int err;
+
pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
@@ -16661,7 +16663,14 @@ static int __init i40e_init_module(void)
}
i40e_dbg_init();
- return pci_register_driver(&i40e_driver);
+ err = pci_register_driver(&i40e_driver);
+ if (err) {
+ destroy_workqueue(i40e_wq);
+ i40e_dbg_exit();
+ return err;
+ }
+
+ return 0;
}
module_init(i40e_init_module);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 3f6187c16424..0d1bab4ac1b0 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -298,7 +298,6 @@ struct iavf_adapter {
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
-#define IAVF_FLAG_INITIAL_MAC_SET BIT(23)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 3fc572341781..f71e132ede09 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1087,12 +1087,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
if (ret)
return ret;
- /* If this is an initial set MAC during VF spawn do not wait */
- if (adapter->flags & IAVF_FLAG_INITIAL_MAC_SET) {
- adapter->flags &= ~IAVF_FLAG_INITIAL_MAC_SET;
- return 0;
- }
-
ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
iavf_is_mac_set_handled(netdev, addr->sa_data),
msecs_to_jiffies(2500));
@@ -2605,8 +2599,6 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
- adapter->flags |= IAVF_FLAG_INITIAL_MAC_SET;
-
adapter->tx_desc_count = IAVF_DEFAULT_TXD;
adapter->rx_desc_count = IAVF_DEFAULT_RXD;
err = iavf_init_interrupt_scheme(adapter);
@@ -2921,7 +2913,6 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
iavf_free_queues(adapter);
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
iavf_shutdown_adminq(&adapter->hw);
- adapter->netdev->flags &= ~IFF_UP;
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
@@ -3021,6 +3012,11 @@ static void iavf_reset_task(struct work_struct *work)
iavf_disable_vf(adapter);
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ dev_close(netdev);
+ rtnl_unlock();
+ }
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -3033,6 +3029,7 @@ continue_reset:
if (running) {
netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
iavf_napi_disable_all(adapter);
}
@@ -3172,6 +3169,16 @@ reset_err:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
+
+ if (netif_running(netdev)) {
+ /* Close device to ensure that Tx queues will not be started
+ * during netif_device_attach() at the end of the reset task.
+ */
+ rtnl_lock();
+ dev_close(netdev);
+ rtnl_unlock();
+ }
+
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
reset_finish:
rtnl_lock();
@@ -5035,23 +5042,21 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
static void iavf_remove(struct pci_dev *pdev)
{
struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
- struct net_device *netdev = adapter->netdev;
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
+ struct iavf_cloud_filter *cf, *cftmp;
struct iavf_adv_rss *rss, *rsstmp;
struct iavf_mac_filter *f, *ftmp;
- struct iavf_cloud_filter *cf, *cftmp;
- struct iavf_hw *hw = &adapter->hw;
+ struct net_device *netdev;
+ struct iavf_hw *hw;
int err;
- /* When reboot/shutdown is in progress no need to do anything
- * as the adapter is already REMOVE state that was set during
- * iavf_shutdown() callback.
- */
- if (adapter->state == __IAVF_REMOVE)
+ netdev = adapter->netdev;
+ hw = &adapter->hw;
+
+ if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return;
- set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
/* Wait until port initialization is complete.
* There are flows where register/unregister netdev may race.
*/
@@ -5191,6 +5196,8 @@ static struct pci_driver iavf_driver = {
**/
static int __init iavf_init_module(void)
{
+ int ret;
+
pr_info("iavf: %s\n", iavf_driver_string);
pr_info("%s\n", iavf_copyright);
@@ -5201,7 +5208,12 @@ static int __init iavf_init_module(void)
pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
return -ENOMEM;
}
- return pci_register_driver(&iavf_driver);
+
+ ret = pci_register_driver(&iavf_driver);
+ if (ret)
+ destroy_workqueue(iavf_wq);
+
+ return ret;
}
module_init(iavf_init_module);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0f6718719453..ca2898467dcb 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3145,15 +3145,15 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
*/
static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
{
- irqreturn_t ret = IRQ_HANDLED;
struct ice_pf *pf = data;
- bool irq_handled;
- irq_handled = ice_ptp_process_ts(pf);
- if (!irq_handled)
- ret = IRQ_WAKE_THREAD;
+ if (ice_is_reset_in_progress(pf->state))
+ return IRQ_HANDLED;
- return ret;
+ while (!ice_ptp_process_ts(pf))
+ usleep_range(50, 100);
+
+ return IRQ_HANDLED;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 011b727ab190..0f668468d141 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -614,11 +614,14 @@ static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
* 2) extend the 40b timestamp value to get a 64bit timestamp
* 3) send that timestamp to the stack
*
- * After looping, if we still have waiting SKBs, return true. This may cause us
- * effectively poll even when not strictly necessary. We do this because it's
- * possible a new timestamp was requested around the same time as the interrupt.
- * In some cases hardware might not interrupt us again when the timestamp is
- * captured.
+ * Returns true if all timestamps were handled, and false if any slots remain
+ * without a timestamp.
+ *
+ * After looping, if we still have waiting SKBs, return false. This may cause
+ * us effectively poll even when not strictly necessary. We do this because
+ * it's possible a new timestamp was requested around the same time as the
+ * interrupt. In some cases hardware might not interrupt us again when the
+ * timestamp is captured.
*
* Note that we only take the tracking lock when clearing the bit and when
* checking if we need to re-queue this task. The only place where bits can be
@@ -641,7 +644,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
u8 idx;
if (!tx->init)
- return false;
+ return true;
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
@@ -2381,10 +2384,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
*/
bool ice_ptp_process_ts(struct ice_pf *pf)
{
- if (pf->ptp.port.tx.init)
- return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
-
- return false;
+ return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
}
static void ice_ptp_periodic_work(struct kthread_work *work)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 99933e89717a..e338fa572793 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4869,6 +4869,8 @@ static struct pci_driver ixgbevf_driver = {
**/
static int __init ixgbevf_init_module(void)
{
+ int err;
+
pr_info("%s\n", ixgbevf_driver_string);
pr_info("%s\n", ixgbevf_copyright);
ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
@@ -4877,7 +4879,13 @@ static int __init ixgbevf_init_module(void)
return -ENOMEM;
}
- return pci_register_driver(&ixgbevf_driver);
+ err = pci_register_driver(&ixgbevf_driver);
+ if (err) {
+ destroy_workqueue(ixgbevf_wq);
+ return err;
+ }
+
+ return 0;
}
module_init(ixgbevf_init_module);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index eb0fb8128096..b399bdb1ca36 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -7350,6 +7350,7 @@ static int mvpp2_get_sram(struct platform_device *pdev,
struct mvpp2 *priv)
{
struct resource *res;
+ void __iomem *base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (!res) {
@@ -7360,9 +7361,12 @@ static int mvpp2_get_sram(struct platform_device *pdev,
return 0;
}
- priv->cm3_base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- return PTR_ERR_OR_ZERO(priv->cm3_base);
+ priv->cm3_base = base;
+ return 0;
}
static int mvpp2_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 9089adcb75f9..b45dd7f04e21 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -521,14 +521,12 @@ static int octep_open(struct net_device *netdev)
octep_oq_dbell_init(oct);
ret = octep_get_link_status(oct);
- if (ret)
+ if (ret > 0)
octep_link_up(netdev);
return 0;
set_queues_err:
- octep_napi_disable(oct);
- octep_napi_delete(oct);
octep_clean_irqs(oct);
setup_irq_err:
octep_free_oqs(oct);
@@ -958,7 +956,7 @@ int octep_device_setup(struct octep_device *oct)
ret = octep_ctrl_mbox_init(ctrl_mbox);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize control mbox\n");
- return -1;
+ goto unsupported_dev;
}
oct->ctrl_mbox_ifstats_offset = OCTEP_CTRL_MBOX_SZ(ctrl_mbox->h2fq.elem_sz,
ctrl_mbox->h2fq.elem_cnt,
@@ -968,6 +966,10 @@ int octep_device_setup(struct octep_device *oct)
return 0;
unsupported_dev:
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++)
+ iounmap(oct->mmio[i].hw_addr);
+
+ kfree(oct->conf);
return -1;
}
@@ -1070,7 +1072,11 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->max_mtu = OCTEP_MAX_MTU;
netdev->mtu = OCTEP_DEFAULT_MTU;
- octep_get_mac_addr(octep_dev, octep_dev->mac_addr);
+ err = octep_get_mac_addr(octep_dev, octep_dev->mac_addr);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get mac address\n");
+ goto register_dev_err;
+ }
eth_hw_addr_set(netdev, octep_dev->mac_addr);
err = register_netdev(netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 6b4f640163f7..993ac180a5db 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -32,7 +32,6 @@ config OCTEONTX2_PF
tristate "Marvell OcteonTX2 NIC Physical Function driver"
select OCTEONTX2_MBOX
select NET_DEVLINK
- depends on MACSEC || !MACSEC
depends on (64BIT && COMPILE_TEST) || ARM64
select DIMLIB
depends on PCI
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
index 4a343f853b28..c0bedf402da9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
@@ -951,7 +951,7 @@ static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction d
else
event.intr_mask = (dir == MCS_RX) ?
MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
- MCS_BBE_RX_PLFIFO_OVERFLOW_INT;
+ MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
/* Notify the lmac_id info which ran into BBE fatal error */
event.lmac_id = i & 0x3ULL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index a1970ebedf95..f66dde2b0f92 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -880,6 +880,8 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
sprintf(lmac, "LMAC%d", lmac_id);
seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
+
+ pci_dev_put(pdev);
}
return 0;
}
@@ -2566,6 +2568,7 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
}
}
+ pci_dev_put(pdev);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 7646bb2ec89b..a62c1b322012 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4985,6 +4985,8 @@ static int nix_setup_ipolicers(struct rvu *rvu,
ipolicer->ref_count = devm_kcalloc(rvu->dev,
ipolicer->band_prof.max,
sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->ref_count)
+ return -ENOMEM;
}
/* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index b04fb226f708..ae50d56258ec 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -62,15 +62,18 @@ int rvu_sdp_init(struct rvu *rvu)
pfvf->sdp_info = devm_kzalloc(rvu->dev,
sizeof(struct sdp_node_info),
GFP_KERNEL);
- if (!pfvf->sdp_info)
+ if (!pfvf->sdp_info) {
+ pci_dev_put(pdev);
return -ENOMEM;
+ }
dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]);
- put_device(&pdev->dev);
i++;
}
+ pci_dev_put(pdev);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 282db6fe3b08..67aa02bb2b85 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -884,7 +884,7 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
{
#ifdef CONFIG_DCB
- if (pfvf->pfc_alloc_status[qidx])
+ if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx])
return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
#endif
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 24f9d6024745..47796e4d900c 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -746,6 +746,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
return 0;
err_sfp_bind:
+ unregister_netdev(dev);
err_register_netdev:
prestera_port_list_del(port);
err_port_init:
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index 4046be0e86ff..a9a1028cb17b 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -457,7 +457,7 @@ prestera_kern_neigh_cache_find(struct prestera_switch *sw,
n_cache =
rhashtable_lookup_fast(&sw->router->kern_neigh_cache_ht, key,
__prestera_kern_neigh_cache_ht_params);
- return IS_ERR(n_cache) ? NULL : n_cache;
+ return n_cache;
}
static void
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
index aa080dc57ff0..02faaea2aefa 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
@@ -330,7 +330,7 @@ prestera_nh_neigh_find(struct prestera_switch *sw,
nh_neigh = rhashtable_lookup_fast(&sw->router->nh_neigh_ht,
key, __prestera_nh_neigh_ht_params);
- return IS_ERR(nh_neigh) ? NULL : nh_neigh;
+ return nh_neigh;
}
struct prestera_nh_neigh *
@@ -484,7 +484,7 @@ __prestera_nexthop_group_find(struct prestera_switch *sw,
nh_grp = rhashtable_lookup_fast(&sw->router->nexthop_group_ht,
key, __prestera_nexthop_group_ht_params);
- return IS_ERR(nh_grp) ? NULL : nh_grp;
+ return nh_grp;
}
static struct prestera_nexthop_group *
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 7cd381530aa4..1d36619c5ec9 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2378,8 +2378,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
data + NET_SKB_PAD + eth->ip_align,
ring->buf_size, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dma_dev,
- dma_addr)))
+ dma_addr))) {
+ skb_free_frag(data);
return -ENOMEM;
+ }
}
rxd->rxd1 = (unsigned int)dma_addr;
ring->data[i] = data;
@@ -2996,8 +2998,10 @@ static int mtk_open(struct net_device *dev)
int i;
err = mtk_start_dma(eth);
- if (err)
+ if (err) {
+ phylink_disconnect_phy(mac->phylink);
return err;
+ }
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
mtk_ppe_start(eth->ppe[i]);
@@ -4143,13 +4147,13 @@ static int mtk_probe(struct platform_device *pdev)
eth->soc->offload_version, i);
if (!eth->ppe[i]) {
err = -ENOMEM;
- goto err_free_dev;
+ goto err_deinit_ppe;
}
}
err = mtk_eth_offload_init(eth);
if (err)
- goto err_free_dev;
+ goto err_deinit_ppe;
}
for (i = 0; i < MTK_MAX_DEVS; i++) {
@@ -4159,7 +4163,7 @@ static int mtk_probe(struct platform_device *pdev)
err = register_netdev(eth->netdev[i]);
if (err) {
dev_err(eth->dev, "error bringing up device\n");
- goto err_deinit_mdio;
+ goto err_deinit_ppe;
} else
netif_info(eth, probe, eth->netdev[i],
"mediatek frame engine at 0x%08lx, irq %d\n",
@@ -4177,7 +4181,8 @@ static int mtk_probe(struct platform_device *pdev)
return 0;
-err_deinit_mdio:
+err_deinit_ppe:
+ mtk_ppe_deinit(eth);
mtk_mdio_cleanup(eth);
err_free_dev:
mtk_free_dev(eth);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 2d8ca99f2467..784ecb2dc9fb 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -737,7 +737,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
MTK_PPE_ENTRIES * soc->foe_entry_size,
&ppe->foe_phys, GFP_KERNEL);
if (!foe)
- return NULL;
+ goto err_free_l2_flows;
ppe->foe_table = foe;
@@ -745,11 +745,26 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
sizeof(*ppe->foe_flow);
ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
if (!ppe->foe_flow)
- return NULL;
+ goto err_free_l2_flows;
mtk_ppe_debugfs_init(ppe, index);
return ppe;
+
+err_free_l2_flows:
+ rhashtable_destroy(&ppe->l2_flows);
+ return NULL;
+}
+
+void mtk_ppe_deinit(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
+ if (!eth->ppe[i])
+ return;
+ rhashtable_destroy(&eth->ppe[i]->l2_flows);
+ }
}
static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 0b7a67a958e4..a09c32539bcc 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -304,6 +304,7 @@ struct mtk_ppe {
struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
int version, int index);
+void mtk_ppe_deinit(struct mtk_eth *eth);
void mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index b149e601f673..48cfaa7eaf50 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -697,7 +697,8 @@ static int mlx4_create_zones(struct mlx4_dev *dev,
err = mlx4_bitmap_init(*bitmap + k, 1,
MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
0);
- mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
+ if (!err)
+ mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
}
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 2e0d59ca62b5..e7a894ba5c3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -45,6 +45,8 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/tout.h"
+#define CREATE_TRACE_POINTS
+#include "diag/cmd_tracepoint.h"
enum {
CMD_IF_REV = 5,
@@ -785,27 +787,14 @@ EXPORT_SYMBOL(mlx5_cmd_out_err);
static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
{
u16 opcode, op_mod;
- u32 syndrome;
- u8 status;
u16 uid;
- int err;
-
- syndrome = MLX5_GET(mbox_out, out, syndrome);
- status = MLX5_GET(mbox_out, out, status);
opcode = MLX5_GET(mbox_in, in, opcode);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
- err = cmd_status_to_err(status);
-
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
mlx5_cmd_out_err(dev, opcode, op_mod, out);
- else
- mlx5_core_dbg(dev,
- "%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
- mlx5_command_str(opcode), opcode, op_mod, uid,
- cmd_status_str(status), status, syndrome, err);
}
int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
@@ -1016,6 +1005,7 @@ static void cmd_work_handler(struct work_struct *work)
cmd_ent_get(ent);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
+ cmd_ent_get(ent); /* for the _real_ FW event on completion */
/* Skip sending command to fw if internal error */
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
ent->ret = -ENXIO;
@@ -1023,7 +1013,6 @@ static void cmd_work_handler(struct work_struct *work)
return;
}
- cmd_ent_get(ent); /* for the _real_ FW event on completion */
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
@@ -1508,8 +1497,8 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
return -EFAULT;
err = sscanf(outlen_str, "%d", &outlen);
- if (err < 0)
- return err;
+ if (err != 1)
+ return -EINVAL;
ptr = kzalloc(outlen, GFP_KERNEL);
if (!ptr)
@@ -1672,8 +1661,8 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
cmd_ent_put(ent); /* timeout work was canceled */
if (!forced || /* Real FW completion */
- pci_channel_offline(dev->pdev) || /* FW is inaccessible */
- dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ mlx5_cmd_is_down(dev) || /* No real FW completion is expected */
+ !opcode_allowed(cmd, ent->op))
cmd_ent_put(ent);
ent->ts2 = ktime_get_ns();
@@ -1892,6 +1881,16 @@ out_in:
return err;
}
+static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
+{
+ u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
+ u8 status = MLX5_GET(mbox_out, out, status);
+
+ trace_mlx5_cmd(mlx5_command_str(opcode), opcode, op_mod,
+ cmd_status_str(status), status, syndrome,
+ cmd_status_to_err(status));
+}
+
static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
u32 syndrome, int err)
{
@@ -1914,7 +1913,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
}
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
-static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out)
+static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out)
{
u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
u8 status = MLX5_GET(mbox_out, out, status);
@@ -1922,8 +1921,10 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *
if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
err = -EIO;
- if (!err && status != MLX5_CMD_STAT_OK)
+ if (!err && status != MLX5_CMD_STAT_OK) {
err = -EREMOTEIO;
+ mlx5_cmd_err_trace(dev, opcode, op_mod, out);
+ }
cmd_status_log(dev, opcode, status, syndrome, err);
return err;
@@ -1951,9 +1952,9 @@ int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int
{
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
- err = cmd_status_err(dev, err, opcode, out);
- return err;
+ return cmd_status_err(dev, err, opcode, op_mod, out);
}
EXPORT_SYMBOL(mlx5_cmd_do);
@@ -1997,8 +1998,9 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
{
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
- err = cmd_status_err(dev, err, opcode, out);
+ err = cmd_status_err(dev, err, opcode, op_mod, out);
return mlx5_cmd_check(dev, err, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec_polling);
@@ -2034,7 +2036,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
struct mlx5_async_ctx *ctx;
ctx = work->ctx;
- status = cmd_status_err(ctx->dev, status, work->opcode, work->out);
+ status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
@@ -2049,6 +2051,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->ctx = ctx;
work->user_callback = callback;
work->opcode = MLX5_GET(mbox_in, in, opcode);
+ work->op_mod = MLX5_GET(mbox_in, in, op_mod);
work->out = out;
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h
new file mode 100644
index 000000000000..406ebe17405f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_CMD_TP_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_CMD_TP_H_
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+TRACE_EVENT(mlx5_cmd,
+ TP_PROTO(const char *command_str, u16 opcode, u16 op_mod,
+ const char *status_str, u8 status, u32 syndrome, int err),
+ TP_ARGS(command_str, opcode, op_mod, status_str, status, syndrome, err),
+ TP_STRUCT__entry(__string(command_str, command_str)
+ __field(u16, opcode)
+ __field(u16, op_mod)
+ __string(status_str, status_str)
+ __field(u8, status)
+ __field(u32, syndrome)
+ __field(int, err)
+ ),
+ TP_fast_assign(__assign_str(command_str, command_str);
+ __entry->opcode = opcode;
+ __entry->op_mod = op_mod;
+ __assign_str(status_str, status_str);
+ __entry->status = status;
+ __entry->syndrome = syndrome;
+ __entry->err = err;
+ ),
+ TP_printk("%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)",
+ __get_str(command_str), __entry->opcode, __entry->op_mod,
+ __get_str(status_str), __entry->status, __entry->syndrome,
+ __entry->err)
+);
+
+#endif /* _MLX5_CMD_TP_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ./diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cmd_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 978a2bb8e122..21831386b26e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -638,7 +638,7 @@ static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer,
trace_timestamp = (timestamp_event.timestamp & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0);
else
- trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) |
+ trace_timestamp = ((timestamp_event.timestamp - 1) & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0);
mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 5aff97914367..ff73d25bc6eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -224,15 +224,16 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
continue;
- spec = &flow->attr->parse_attr->spec;
-
- /* update from encap rule to slow path rule */
- rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
/* mark the flow's encap dest as non-valid */
esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
+ esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL;
+
+ /* update from encap rule to slow path rule */
+ spec = &flow->attr->parse_attr->spec;
+ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -251,6 +252,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
/* we know that the encap is valid */
e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
+ e->pkt_reformat = NULL;
}
static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow,
@@ -762,8 +764,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
int out_index,
struct netlink_ext_ack *extack,
- struct net_device **encap_dev,
- bool *encap_valid)
+ struct net_device **encap_dev)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
@@ -878,9 +879,8 @@ attach_flow:
if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
- *encap_valid = true;
} else {
- *encap_valid = false;
+ flow_flag_set(flow, SLOW);
}
mutex_unlock(&esw->offloads.encap_tbl_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
index d542b8476491..8ad273dde40e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
@@ -17,8 +17,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
int out_index,
struct netlink_ext_ack *extack,
- struct net_device **encap_dev,
- bool *encap_valid);
+ struct net_device **encap_dev);
int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 285d32d2fd08..d7c020f72401 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -365,7 +365,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
accel_fs_tcp_destroy_table(fs, i);
- kfree(accel_tcp);
+ kvfree(accel_tcp);
mlx5e_fs_set_accel_tcp(fs, NULL);
}
@@ -397,7 +397,7 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
err_destroy_tables:
while (--i >= 0)
accel_fs_tcp_destroy_table(fs, i);
- kfree(accel_tcp);
+ kvfree(accel_tcp);
mlx5e_fs_set_accel_tcp(fs, NULL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 2ef36cb9555a..f900709639f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -368,15 +368,15 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
obj_attrs.aso_pdn = macsec->aso.pdn;
obj_attrs.epn_state = sa->epn_state;
- if (is_tx) {
- obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci);
- key = &ctx->sa.tx_sa->key;
- } else {
- obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
- key = &ctx->sa.rx_sa->key;
+ key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key;
+
+ if (sa->epn_state.epn_enabled) {
+ obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) :
+ cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
+
+ memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
}
- memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
obj_attrs.replay_window = ctx->secy->replay_window;
obj_attrs.replay_protect = ctx->secy->replay_protect;
@@ -427,15 +427,15 @@ mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
return NULL;
}
-static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
- struct mlx5e_macsec_sa *rx_sa,
- bool active)
+static int macsec_rx_sa_active_update(struct macsec_context *ctx,
+ struct mlx5e_macsec_sa *rx_sa,
+ bool active)
{
- struct mlx5_core_dev *mdev = macsec->mdev;
- struct mlx5_macsec_obj_attrs attrs = {};
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec *macsec = priv->macsec;
int err = 0;
- if (rx_sa->active != active)
+ if (rx_sa->active == active)
return 0;
rx_sa->active = active;
@@ -444,13 +444,11 @@ static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
return 0;
}
- attrs.sci = cpu_to_be64((__force u64)rx_sa->sci);
- attrs.enc_key_id = rx_sa->enc_key_id;
- err = mlx5e_macsec_create_object(mdev, &attrs, false, &rx_sa->macsec_obj_id);
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
if (err)
- return err;
+ rx_sa->active = false;
- return 0;
+ return err;
}
static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
@@ -476,6 +474,11 @@ static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
return false;
}
+ if (!ctx->secy->tx_sc.encrypt) {
+ netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n");
+ return false;
+ }
+
return true;
}
@@ -620,6 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
if (tx_sa->active == ctx_tx_sa->active)
goto out;
+ tx_sa->active = ctx_tx_sa->active;
if (tx_sa->assoc_num != tx_sc->encoding_sa)
goto out;
@@ -635,8 +639,6 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
}
-
- tx_sa->active = ctx_tx_sa->active;
out:
mutex_unlock(&macsec->lock);
@@ -736,9 +738,14 @@ static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
sc_xarray_element->rx_sc = rx_sc;
err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
- XA_LIMIT(1, USHRT_MAX), GFP_KERNEL);
- if (err)
+ XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
+ if (err) {
+ if (err == -EBUSY)
+ netdev_err(ctx->netdev,
+ "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
+ MLX5_MACEC_RX_FS_ID_MAX);
goto destroy_sc_xarray_elemenet;
+ }
rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
if (!rx_sc->md_dst) {
@@ -798,16 +805,16 @@ static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
goto out;
}
- rx_sc->active = ctx_rx_sc->active;
if (rx_sc->active == ctx_rx_sc->active)
goto out;
+ rx_sc->active = ctx_rx_sc->active;
for (i = 0; i < MACSEC_NUM_AN; ++i) {
rx_sa = rx_sc->rx_sa[i];
if (!rx_sa)
continue;
- err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, rx_sa->active && ctx_rx_sc->active);
+ err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active);
if (err)
goto out;
}
@@ -818,16 +825,43 @@ out:
return err;
}
+static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc)
+{
+ struct mlx5e_macsec_sa *rx_sa;
+ int i;
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+
+ kfree(rx_sa);
+ rx_sc->rx_sa[i] = NULL;
+ }
+
+ /* At this point the relevant MACsec offload Rx rule already removed at
+ * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
+ * Rx related data propagating using xa_erase which uses rcu to sync,
+ * once fs_id is erased then this rx_sc is hidden from datapath.
+ */
+ list_del_rcu(&rx_sc->rx_sc_list_element);
+ xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
+ metadata_dst_free(rx_sc->md_dst);
+ kfree(rx_sc->sc_xarray_element);
+ kfree_rcu(rx_sc);
+}
+
static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
{
struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
struct mlx5e_macsec_device *macsec_device;
struct mlx5e_macsec_rx_sc *rx_sc;
- struct mlx5e_macsec_sa *rx_sa;
struct mlx5e_macsec *macsec;
struct list_head *list;
int err = 0;
- int i;
mutex_lock(&priv->macsec->lock);
@@ -849,31 +883,7 @@ static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
goto out;
}
- for (i = 0; i < MACSEC_NUM_AN; ++i) {
- rx_sa = rx_sc->rx_sa[i];
- if (!rx_sa)
- continue;
-
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
- mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
-
- kfree(rx_sa);
- rx_sc->rx_sa[i] = NULL;
- }
-
-/*
- * At this point the relevant MACsec offload Rx rule already removed at
- * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
- * Rx related data propagating using xa_erase which uses rcu to sync,
- * once fs_id is erased then this rx_sc is hidden from datapath.
- */
- list_del_rcu(&rx_sc->rx_sc_list_element);
- xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
- metadata_dst_free(rx_sc->md_dst);
- kfree(rx_sc->sc_xarray_element);
-
- kfree_rcu(rx_sc);
-
+ macsec_del_rxsc_ctx(macsec, rx_sc);
out:
mutex_unlock(&macsec->lock);
@@ -1015,7 +1025,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
goto out;
}
- err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, ctx_rx_sa->active);
+ err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active);
out:
mutex_unlock(&macsec->lock);
@@ -1155,7 +1165,7 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
continue;
if (rx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, rx_sa, false, false);
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
if (err)
goto out;
}
@@ -1234,7 +1244,6 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
struct mlx5e_macsec_device *macsec_device;
struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
- struct mlx5e_macsec_sa *rx_sa;
struct mlx5e_macsec_sa *tx_sa;
struct mlx5e_macsec *macsec;
struct list_head *list;
@@ -1263,28 +1272,15 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
}
list = &macsec_device->macsec_rx_sc_list_head;
- list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
- for (i = 0; i < MACSEC_NUM_AN; ++i) {
- rx_sa = rx_sc->rx_sa[i];
- if (!rx_sa)
- continue;
-
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
- mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
- kfree(rx_sa);
- rx_sc->rx_sa[i] = NULL;
- }
-
- list_del_rcu(&rx_sc->rx_sc_list_element);
-
- kfree_rcu(rx_sc);
- }
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
+ macsec_del_rxsc_ctx(macsec, rx_sc);
kfree(macsec_device->dev_addr);
macsec_device->dev_addr = NULL;
list_del_rcu(&macsec_device->macsec_device_list_element);
--macsec->num_of_devices;
+ kfree(macsec_device);
out:
mutex_unlock(&macsec->lock);
@@ -1536,6 +1532,8 @@ static void macsec_async_event(struct work_struct *work)
async_work = container_of(work, struct mlx5e_macsec_async_work, work);
macsec = async_work->macsec;
+ mutex_lock(&macsec->lock);
+
mdev = async_work->mdev;
obj_id = async_work->obj_id;
macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
@@ -1557,6 +1555,7 @@ static void macsec_async_event(struct work_struct *work)
out_async_work:
kfree(async_work);
+ mutex_unlock(&macsec->lock);
}
static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
@@ -1745,7 +1744,7 @@ void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
if (!macsec)
return;
- fs_id = MLX5_MACSEC_METADATA_HANDLE(macsec_meta_data);
+ fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
rcu_read_lock();
sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
index d580b4a91253..347380a2cd9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
@@ -10,9 +10,11 @@
#include <net/macsec.h>
#include <net/dst_metadata.h>
-/* Bit31 - 30: MACsec marker, Bit3-0: MACsec id */
+/* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */
+#define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */
+#define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX
#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
-#define MLX5_MACSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(3, 0))
+#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)
struct mlx5e_priv;
struct mlx5e_macsec;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
index 1ac0cf04e811..5b658a5588c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
@@ -250,7 +250,7 @@ static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
u32 *flow_group_in;
- int err = 0;
+ int err;
ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
if (!ns)
@@ -261,8 +261,10 @@ static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
return -ENOMEM;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in)
+ if (!flow_group_in) {
+ err = -ENOMEM;
goto out_spec;
+ }
tx_tables = &tx_fs->tables;
ft_crypto = &tx_tables->ft_crypto;
@@ -898,7 +900,7 @@ static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
u32 *flow_group_in;
- int err = 0;
+ int err;
ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
if (!ns)
@@ -909,8 +911,10 @@ static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)
return -ENOMEM;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in)
+ if (!flow_group_in) {
+ err = -ENOMEM;
goto free_spec;
+ }
rx_tables = &rx_fs->tables;
ft_crypto = &rx_tables->ft_crypto;
@@ -1142,10 +1146,10 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
ft_crypto = &rx_tables->ft_crypto;
/* Set bit[31 - 30] macsec marker - 0x01 */
- /* Set bit[3-0] fs id */
+ /* Set bit[15-0] fs id */
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(set_action_in, action, data, fs_id | BIT(30));
+ MLX5_SET(set_action_in, action, data, MLX5_MACSEC_RX_METADAT_HANDLE(fs_id) | BIT(30));
MLX5_SET(set_action_in, action, offset, 0);
MLX5_SET(set_action_in, action, length, 32);
@@ -1205,6 +1209,7 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
rx_rule->rule[1] = rule;
}
+ kvfree(spec);
return macsec_rule;
err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 24aa25da482b..1728e197558d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -35,7 +35,6 @@
#include "en.h"
#include "en/port.h"
#include "en/params.h"
-#include "en/xsk/pool.h"
#include "en/ptp.h"
#include "lib/clock.h"
#include "en/fs_ethtool.h"
@@ -412,15 +411,8 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch)
{
mutex_lock(&priv->state_lock);
-
ch->max_combined = priv->max_nch;
ch->combined_count = priv->channels.params.num_channels;
- if (priv->xsk.refcnt) {
- /* The upper half are XSK queues. */
- ch->max_combined *= 2;
- ch->combined_count *= 2;
- }
-
mutex_unlock(&priv->state_lock);
}
@@ -454,16 +446,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
- /* Don't allow changing the number of channels if there is an active
- * XSK, because the numeration of the XSK and regular RQs will change.
- */
- if (priv->xsk.refcnt) {
- err = -EINVAL;
- netdev_err(priv->netdev, "%s: AF_XDP is active, cannot change the number of channels\n",
- __func__);
- goto out;
- }
-
/* Don't allow changing the number of channels if HTB offload is active,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index e3a4f01bcceb..5e41dfdf79c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -206,10 +206,11 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode)
{
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
+ u32 sz;
- WARN_ON_ONCE(entries * umr_entry_size % MLX5_OCTWORD);
+ sz = ALIGN(entries * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
- return entries * umr_entry_size / MLX5_OCTWORD;
+ return sz / MLX5_OCTWORD;
}
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 5a6aa61ec82a..bd9936af4582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1634,7 +1634,6 @@ set_encap_dests(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack,
- bool *encap_valid,
bool *vf_tun)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
@@ -1651,7 +1650,6 @@ set_encap_dests(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
esw_attr = attr->esw_attr;
*vf_tun = false;
- *encap_valid = true;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
struct net_device *out_dev;
@@ -1668,7 +1666,7 @@ set_encap_dests(struct mlx5e_priv *priv,
goto out;
}
err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
- extack, &encap_dev, encap_valid);
+ extack, &encap_dev);
dev_put(out_dev);
if (err)
goto out;
@@ -1732,8 +1730,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
- bool vf_tun, encap_valid;
u32 max_prio, max_chain;
+ bool vf_tun;
int err = 0;
parse_attr = attr->parse_attr;
@@ -1823,7 +1821,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->int_port = int_port;
}
- err = set_encap_dests(priv, flow, attr, extack, &encap_valid, &vf_tun);
+ err = set_encap_dests(priv, flow, attr, extack, &vf_tun);
if (err)
goto err_out;
@@ -1853,7 +1851,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
*/
- if (!encap_valid || flow_flag_test(flow, SLOW))
+ if (flow_flag_test(flow, SLOW))
flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
@@ -3759,7 +3757,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
struct mlx5e_post_act *post_act = get_post_action(flow->priv);
struct mlx5_flow_attr *attr, *next_attr = NULL;
struct mlx5e_post_act_handle *handle;
- bool vf_tun, encap_valid = true;
+ bool vf_tun;
int err;
/* This is going in reverse order as needed.
@@ -3781,13 +3779,10 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
if (list_is_last(&attr->list, &flow->attrs))
break;
- err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun);
+ err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
if (err)
goto out_free;
- if (!encap_valid)
- flow_flag_set(flow, SLOW);
-
err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
if (err)
goto out_free;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2169486c4bfb..374e3fbdc2cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1362,6 +1362,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
devl_rate_nodes_destroy(devlink);
}
+ /* Destroy legacy fdb when disabling sriov in legacy mode. */
+ if (esw->mode == MLX5_ESWITCH_LEGACY)
+ mlx5_eswitch_disable_locked(esw);
esw->esw_funcs.num_vfs = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index f68dc2d0dbe6..3029bc1c0dd0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -736,6 +736,14 @@ void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw);
int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
+static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
+{
+ if (mlx5_esw_allowed(esw))
+ return esw->esw_funcs.num_vfs;
+
+ return 0;
+}
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 728ca9f2bb9d..8c6c9bcb3dc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -433,7 +433,7 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
mlx5_lag_mpesw_is_activated(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
- if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
+ if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
if (pkt_reformat) {
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
@@ -3387,6 +3387,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
int err;
esw->mode = MLX5_ESWITCH_LEGACY;
+
+ /* If changing from switchdev to legacy mode without sriov enabled,
+ * no need to create legacy fdb.
+ */
+ if (!mlx5_sriov_is_enabled(esw->dev))
+ return 0;
+
err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err)
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 108a3503f413..edd910258314 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -312,6 +312,8 @@ revert_changes:
for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) {
struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl;
+ attr->dests[curr_dest].termtbl = NULL;
+
/* search for the destination associated with the
* current term table
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 9d908a0ccfef..1e46f9afa40e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -9,7 +9,8 @@ enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
MLX5_FW_RESET_FLAGS_PENDING_COMP,
- MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
+ MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
+ MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED
};
struct mlx5_fw_reset {
@@ -406,7 +407,7 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
err = mlx5_pci_link_toggle(dev);
if (err) {
mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
- goto done;
+ set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags);
}
mlx5_enter_error_state(dev, true);
@@ -482,6 +483,10 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
goto out;
}
err = fw_reset->ret;
+ if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) {
+ mlx5_unload_one_devl_locked(dev);
+ mlx5_load_one_devl_locked(dev, false);
+ }
out:
clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index a9f4ede4a9bf..32c3e0a649a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -228,9 +228,8 @@ static void mlx5_ldev_free(struct kref *ref)
if (ldev->nb.notifier_call)
unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev);
- mlx5_lag_mpesw_cleanup(ldev);
- cancel_work_sync(&ldev->mpesw_work);
destroy_workqueue(ldev->wq);
+ mlx5_lag_mpesw_cleanup(ldev);
mutex_destroy(&ldev->lock);
kfree(ldev);
}
@@ -701,10 +700,13 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
return false;
#ifdef CONFIG_MLX5_ESWITCH
- dev = ldev->pf[MLX5_LAG_P1].dev;
- if ((mlx5_sriov_is_enabled(dev)) && !is_mdev_switchdev_mode(dev))
- return false;
+ for (i = 0; i < ldev->ports; i++) {
+ dev = ldev->pf[i].dev;
+ if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev))
+ return false;
+ }
+ dev = ldev->pf[MLX5_LAG_P1].dev;
mode = mlx5_eswitch_mode(dev);
for (i = 0; i < ldev->ports; i++)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index ce2ce8ccbd70..f30ac2de639f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -50,6 +50,19 @@ struct lag_tracker {
enum netdev_lag_hash hash_type;
};
+enum mpesw_op {
+ MLX5_MPESW_OP_ENABLE,
+ MLX5_MPESW_OP_DISABLE,
+};
+
+struct mlx5_mpesw_work_st {
+ struct work_struct work;
+ struct mlx5_lag *lag;
+ enum mpesw_op op;
+ struct completion comp;
+ int result;
+};
+
/* LAG data of a ConnectX card.
* It serves both its phys functions.
*/
@@ -66,7 +79,6 @@ struct mlx5_lag {
struct lag_tracker tracker;
struct workqueue_struct *wq;
struct delayed_work bond_work;
- struct work_struct mpesw_work;
struct notifier_block nb;
struct lag_mp lag_mp;
struct mlx5_lag_port_sel port_sel;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index f643202b29c6..c17e8f1ec914 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -7,63 +7,95 @@
#include "eswitch.h"
#include "lib/mlx5.h"
-void mlx5_mpesw_work(struct work_struct *work)
+static int add_mpesw_rule(struct mlx5_lag *ldev)
{
- struct mlx5_lag *ldev = container_of(work, struct mlx5_lag, mpesw_work);
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int err;
- mutex_lock(&ldev->lock);
- mlx5_disable_lag(ldev);
- mutex_unlock(&ldev->lock);
-}
+ if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
+ return 0;
-static void mlx5_lag_disable_mpesw(struct mlx5_core_dev *dev)
-{
- struct mlx5_lag *ldev = dev->priv.lag;
+ if (ldev->mode != MLX5_LAG_MODE_NONE) {
+ err = -EINVAL;
+ goto out_err;
+ }
- if (!queue_work(ldev->wq, &ldev->mpesw_work))
- mlx5_core_warn(dev, "failed to queue work\n");
+ err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
+ if (err) {
+ mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ atomic_dec(&ldev->lag_mpesw.mpesw_rule_count);
+ return err;
}
-void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
+static void del_mpesw_rule(struct mlx5_lag *ldev)
{
- struct mlx5_lag *ldev = dev->priv.lag;
+ if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
+ ldev->mode == MLX5_LAG_MODE_MPESW)
+ mlx5_disable_lag(ldev);
+}
- if (!ldev)
- return;
+static void mlx5_mpesw_work(struct work_struct *work)
+{
+ struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
+ struct mlx5_lag *ldev = mpesww->lag;
mutex_lock(&ldev->lock);
- if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
- ldev->mode == MLX5_LAG_MODE_MPESW)
- mlx5_lag_disable_mpesw(dev);
+ if (mpesww->op == MLX5_MPESW_OP_ENABLE)
+ mpesww->result = add_mpesw_rule(ldev);
+ else if (mpesww->op == MLX5_MPESW_OP_DISABLE)
+ del_mpesw_rule(ldev);
mutex_unlock(&ldev->lock);
+
+ complete(&mpesww->comp);
}
-int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
+static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
+ enum mpesw_op op)
{
struct mlx5_lag *ldev = dev->priv.lag;
+ struct mlx5_mpesw_work_st *work;
int err = 0;
if (!ldev)
return 0;
- mutex_lock(&ldev->lock);
- if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
- goto out;
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
- if (ldev->mode != MLX5_LAG_MODE_NONE) {
+ INIT_WORK(&work->work, mlx5_mpesw_work);
+ init_completion(&work->comp);
+ work->op = op;
+ work->lag = ldev;
+
+ if (!queue_work(ldev->wq, &work->work)) {
+ mlx5_core_warn(dev, "failed to queue mpesw work\n");
err = -EINVAL;
goto out;
}
-
- err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
- if (err)
- mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
-
+ wait_for_completion(&work->comp);
+ err = work->result;
out:
- mutex_unlock(&ldev->lock);
+ kfree(work);
return err;
}
+void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
+{
+ mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE);
+}
+
+int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
+{
+ return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
+}
+
int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
{
struct mlx5_lag *ldev = mdev->priv.lag;
@@ -71,12 +103,9 @@ int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
if (!netif_is_bond_master(out_dev) || !ldev)
return 0;
- mutex_lock(&ldev->lock);
- if (ldev->mode == MLX5_LAG_MODE_MPESW) {
- mutex_unlock(&ldev->lock);
+ if (ldev->mode == MLX5_LAG_MODE_MPESW)
return -EOPNOTSUPP;
- }
- mutex_unlock(&ldev->lock);
+
return 0;
}
@@ -90,11 +119,10 @@ bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
{
- INIT_WORK(&ldev->mpesw_work, mlx5_mpesw_work);
atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);
}
void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)
{
- cancel_delayed_work_sync(&ldev->bond_work);
+ WARN_ON(atomic_read(&ldev->lag_mpesw.mpesw_rule_count));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
index be4abcb8fcd5..88e8daffcf92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
@@ -12,7 +12,6 @@ struct lag_mpesw {
atomic_t mpesw_rule_count;
};
-void mlx5_mpesw_work(struct work_struct *work);
int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev);
bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev);
#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 283c4cc28944..e58775a7d955 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1798,7 +1798,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
res = state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
- mlx5_pci_trace(dev, "Exit, result = %d, %s\n", res, result2str(res));
+ mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, result = %d, %s\n",
+ __func__, dev->state, dev->pci_status, res, result2str(res));
return res;
}
@@ -1837,7 +1838,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
- mlx5_pci_trace(dev, "Enter\n");
+ mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Enter\n",
+ __func__, dev->state, dev->pci_status);
err = mlx5_pci_enable_device(dev);
if (err) {
@@ -1859,7 +1861,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
res = PCI_ERS_RESULT_RECOVERED;
out:
- mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res));
+ mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, err = %d, result = %d, %s\n",
+ __func__, dev->state, dev->pci_status, err, res, result2str(res));
return res;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 7da012ff0d41..8e2abbab05f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -18,6 +18,10 @@ struct mlx5_sf_dev_table {
phys_addr_t base_address;
u64 sf_bar_length;
struct notifier_block nb;
+ struct mutex table_lock; /* Serializes sf life cycle and vhca state change handler */
+ struct workqueue_struct *active_wq;
+ struct work_struct work;
+ u8 stop_active_wq:1;
struct mlx5_core_dev *dev;
};
@@ -168,6 +172,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
return 0;
sf_index = event->function_id - base_id;
+ mutex_lock(&table->table_lock);
sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) {
case MLX5_VHCA_STATE_INVALID:
@@ -191,6 +196,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
default:
break;
}
+ mutex_unlock(&table->table_lock);
return 0;
}
@@ -215,6 +221,78 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
return 0;
}
+static void mlx5_sf_dev_add_active_work(struct work_struct *work)
+{
+ struct mlx5_sf_dev_table *table = container_of(work, struct mlx5_sf_dev_table, work);
+ u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
+ struct mlx5_core_dev *dev = table->dev;
+ u16 max_functions;
+ u16 function_id;
+ u16 sw_func_id;
+ int err = 0;
+ u8 state;
+ int i;
+
+ max_functions = mlx5_sf_max_functions(dev);
+ function_id = MLX5_CAP_GEN(dev, sf_base_id);
+ for (i = 0; i < max_functions; i++, function_id++) {
+ if (table->stop_active_wq)
+ return;
+ err = mlx5_cmd_query_vhca_state(dev, function_id, out, sizeof(out));
+ if (err)
+ /* A failure of specific vhca doesn't mean others will
+ * fail as well.
+ */
+ continue;
+ state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
+ if (state != MLX5_VHCA_STATE_ACTIVE)
+ continue;
+
+ sw_func_id = MLX5_GET(query_vhca_state_out, out, vhca_state_context.sw_function_id);
+ mutex_lock(&table->table_lock);
+ /* Don't probe device which is already probe */
+ if (!xa_load(&table->devices, i))
+ mlx5_sf_dev_add(dev, i, function_id, sw_func_id);
+ /* There is a race where SF got inactive after the query
+ * above. e.g.: the query returns that the state of the
+ * SF is active, and after that the eswitch manager set it to
+ * inactive.
+ * This case cannot be managed in SW, since the probing of the
+ * SF is on one system, and the inactivation is on a different
+ * system.
+ * If the inactive is done after the SF perform init_hca(),
+ * the SF will fully probe and then removed. If it was
+ * done before init_hca(), the SF probe will fail.
+ */
+ mutex_unlock(&table->table_lock);
+ }
+}
+
+/* In case SFs are generated externally, probe active SFs */
+static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table)
+{
+ if (MLX5_CAP_GEN(table->dev, eswitch_manager))
+ return 0; /* the table is local */
+
+ /* Use a workqueue to probe active SFs, which are in large
+ * quantity and may take up to minutes to probe.
+ */
+ table->active_wq = create_singlethread_workqueue("mlx5_active_sf");
+ if (!table->active_wq)
+ return -ENOMEM;
+ INIT_WORK(&table->work, &mlx5_sf_dev_add_active_work);
+ queue_work(table->active_wq, &table->work);
+ return 0;
+}
+
+static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table)
+{
+ if (table->active_wq) {
+ table->stop_active_wq = true;
+ destroy_workqueue(table->active_wq);
+ }
+}
+
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table;
@@ -240,11 +318,17 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
table->base_address = pci_resource_start(dev->pdev, 2);
table->max_sfs = max_sfs;
xa_init(&table->devices);
+ mutex_init(&table->table_lock);
dev->priv.sf_dev_table = table;
err = mlx5_vhca_event_notifier_register(dev, &table->nb);
if (err)
goto vhca_err;
+
+ err = mlx5_sf_dev_queue_active_work(table);
+ if (err)
+ goto add_active_err;
+
err = mlx5_sf_dev_vhca_arm_all(table);
if (err)
goto arm_err;
@@ -252,6 +336,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
return;
arm_err:
+ mlx5_sf_dev_destroy_active_work(table);
+add_active_err:
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
vhca_err:
table->max_sfs = 0;
@@ -279,7 +365,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
if (!table)
return;
+ mlx5_sf_dev_destroy_active_work(table);
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
+ mutex_destroy(&table->table_lock);
/* Now that event handler is not running, it is safe to destroy
* the sf device without race.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index 31d443dd8386..f68461b13391 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -46,7 +46,7 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
struct mlx5dr_action *action)
{
- int ret;
+ int ret = -EOPNOTSUPP;
if (action && action->action_type != DR_ACTION_TYP_FT)
return -EOPNOTSUPP;
@@ -67,6 +67,9 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
goto out;
}
+ if (ret)
+ goto out;
+
/* Release old action */
if (tbl->miss_action)
refcount_dec(&tbl->miss_action->refcount);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 4efccd942fb8..1290b2d3eae6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -3470,6 +3470,8 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
u16 vid;
vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
+ if (!vxlan_fdb_info->offloaded)
+ return;
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
if (!bridge_device)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
index fea42542be28..06811c60d598 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -716,6 +716,9 @@ int lan966x_stats_init(struct lan966x *lan966x)
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(lan966x->dev));
lan966x->stats_queue = create_singlethread_workqueue(queue_name);
+ if (!lan966x->stats_queue)
+ return -ENOMEM;
+
INIT_DELAYED_WORK(&lan966x->stats_work, lan966x_check_stats_work);
queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
LAN966X_STATS_CHECK_DELAY);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 6b0febcb7fa9..01f3a3a41cdb 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1253,6 +1253,9 @@ int sparx_stats_init(struct sparx5 *sparx5)
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(sparx5->dev));
sparx5->stats_queue = create_singlethread_workqueue(queue_name);
+ if (!sparx5->stats_queue)
+ return -ENOMEM;
+
INIT_DELAYED_WORK(&sparx5->stats_work, sparx5_check_stats_work);
queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work,
SPX5_STATS_CHECK_DELAY);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 62a325e96345..eeac04b84638 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -659,6 +659,9 @@ static int sparx5_start(struct sparx5 *sparx5)
snprintf(queue_name, sizeof(queue_name), "%s-mact",
dev_name(sparx5->dev));
sparx5->mact_queue = create_singlethread_workqueue(queue_name);
+ if (!sparx5->mact_queue)
+ return -ENOMEM;
+
INIT_DELAYED_WORK(&sparx5->mact_work, sparx5_mact_pull_work);
queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
SPX5_MACT_PULL_DELAY);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index 19516ccad533..d078156581d5 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -104,7 +104,7 @@ static int sparx5_port_open(struct net_device *ndev)
err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
if (err) {
netdev_err(ndev, "Could not attach to PHY\n");
- return err;
+ goto err_connect;
}
phylink_start(port->phylink);
@@ -116,10 +116,20 @@ static int sparx5_port_open(struct net_device *ndev)
err = sparx5_serdes_set(port->sparx5, port, &port->conf);
else
err = phy_power_on(port->serdes);
- if (err)
+ if (err) {
netdev_err(ndev, "%s failed\n", __func__);
+ goto out_power;
+ }
}
+ return 0;
+
+out_power:
+ phylink_stop(port->phylink);
+ phylink_disconnect_phy(port->phylink);
+err_connect:
+ sparx5_port_enable(port, false);
+
return err;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
index e05429c751ee..dc2c3756e3a2 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
@@ -90,13 +90,10 @@ static int sparx5_tc_setup_qdisc_ets(struct net_device *ndev,
}
}
- sparx5_tc_ets_add(port, params);
- break;
+ return sparx5_tc_ets_add(port, params);
case TC_ETS_DESTROY:
- sparx5_tc_ets_del(port);
-
- break;
+ return sparx5_tc_ets_del(port);
case TC_ETS_GRAFT:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 405786c00334..cb08d7bf9524 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -341,7 +341,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
return ret;
attrs.split = eth_port.is_split;
- attrs.splittable = !attrs.split;
+ attrs.splittable = eth_port.port_lanes > 1 && !attrs.split;
attrs.lanes = eth_port.port_lanes;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = eth_port.label_port;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 22a5d2419084..991059d6cb32 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1432,6 +1432,9 @@ nfp_port_get_module_info(struct net_device *netdev,
u8 data;
port = nfp_port_from_netdev(netdev);
+ if (!port)
+ return -EOPNOTSUPP;
+
/* update port state to get latest interface */
set_bit(NFP_PORT_CHANGED, &port->flags);
eth_port = nfp_port_get_eth_port(port);
@@ -1477,15 +1480,15 @@ nfp_port_get_module_info(struct net_device *netdev,
if (data < 0x3) {
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
}
break;
case NFP_INTERFACE_QSFP28:
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
break;
default:
netdev_err(netdev, "Unsupported module 0x%x detected\n",
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 19d043b593cc..62320be4de5a 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -249,25 +249,26 @@ static void nixge_hw_dma_bd_release(struct net_device *ndev)
struct sk_buff *skb;
int i;
- for (i = 0; i < RX_BD_NUM; i++) {
- phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
- phys);
-
- dma_unmap_single(ndev->dev.parent, phys_addr,
- NIXGE_MAX_JUMBO_FRAME_SIZE,
- DMA_FROM_DEVICE);
-
- skb = (struct sk_buff *)(uintptr_t)
- nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
- sw_id_offset);
- dev_kfree_skb(skb);
- }
+ if (priv->rx_bd_v) {
+ for (i = 0; i < RX_BD_NUM; i++) {
+ phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
+ phys);
+
+ dma_unmap_single(ndev->dev.parent, phys_addr,
+ NIXGE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb = (struct sk_buff *)(uintptr_t)
+ nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
+ sw_id_offset);
+ dev_kfree_skb(skb);
+ }
- if (priv->rx_bd_v)
dma_free_coherent(ndev->dev.parent,
sizeof(*priv->rx_bd_v) * RX_BD_NUM,
priv->rx_bd_v,
priv->rx_bd_p);
+ }
if (priv->tx_skb)
devm_kfree(ndev->dev.parent, priv->tx_skb);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3f2c30184752..28b7cec485ef 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1143,6 +1143,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
buffer_info->dma = 0;
buffer_info->time_stamp = 0;
tx_ring->next_to_use = ring_num;
+ dev_kfree_skb_any(skb);
return;
}
buffer_info->mapped = true;
@@ -2459,6 +2460,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
pch_gbe_phy_hw_reset(&adapter->hw);
+ pci_dev_put(adapter->ptp_pdev);
free_netdev(netdev);
}
@@ -2533,7 +2535,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
/* setup the private structure */
ret = pch_gbe_sw_init(adapter);
if (ret)
- goto err_free_netdev;
+ goto err_put_dev;
/* Initialize PHY */
ret = pch_gbe_init_phy(adapter);
@@ -2591,6 +2593,8 @@ static int pch_gbe_probe(struct pci_dev *pdev,
err_free_adapter:
pch_gbe_phy_hw_reset(&adapter->hw);
+err_put_dev:
+ pci_dev_put(adapter->ptp_pdev);
err_free_netdev:
free_netdev(netdev);
return ret;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 56f93b030551..5456c2b15d9b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -687,8 +687,14 @@ int ionic_port_reset(struct ionic *ionic)
static int __init ionic_init_module(void)
{
+ int ret;
+
ionic_debugfs_create();
- return ionic_bus_register_driver();
+ ret = ionic_bus_register_driver();
+ if (ret)
+ ionic_debugfs_destroy();
+
+ return ret;
}
static void __exit ionic_cleanup_module(void)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 9fb1fa479d4b..16e6bd466143 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -767,34 +767,34 @@ static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
return rc;
}
-#define CONFIG_QEDE_BITMAP_IDX BIT(0)
-#define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
-#define CONFIG_QEDR_BITMAP_IDX BIT(2)
-#define CONFIG_QEDF_BITMAP_IDX BIT(4)
-#define CONFIG_QEDI_BITMAP_IDX BIT(5)
-#define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
+#define BITMAP_IDX_FOR_CONFIG_QEDE BIT(0)
+#define BITMAP_IDX_FOR_CONFIG_QED_SRIOV BIT(1)
+#define BITMAP_IDX_FOR_CONFIG_QEDR BIT(2)
+#define BITMAP_IDX_FOR_CONFIG_QEDF BIT(4)
+#define BITMAP_IDX_FOR_CONFIG_QEDI BIT(5)
+#define BITMAP_IDX_FOR_CONFIG_QED_LL2 BIT(6)
static u32 qed_get_config_bitmap(void)
{
u32 config_bitmap = 0x0;
if (IS_ENABLED(CONFIG_QEDE))
- config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
+ config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDE;
if (IS_ENABLED(CONFIG_QED_SRIOV))
- config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
+ config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_SRIOV;
if (IS_ENABLED(CONFIG_QED_RDMA))
- config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
+ config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDR;
if (IS_ENABLED(CONFIG_QED_FCOE))
- config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
+ config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDF;
if (IS_ENABLED(CONFIG_QED_ISCSI))
- config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
+ config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDI;
if (IS_ENABLED(CONFIG_QED_LL2))
- config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
+ config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_LL2;
return config_bitmap;
}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 76072f8c3d2f..0d57ffcedf0c 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2471,6 +2471,7 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
skb_shinfo(skb)->nr_frags);
if (tx_cb->seg_count == -1) {
netdev_err(ndev, "%s: invalid segment count!\n", __func__);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index bd0607680329..2fd5c6fdb500 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2991,7 +2991,7 @@ static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter)
QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
dev_info(&adapter->pdev->dev,
"%s: lock recovery initiated\n", __func__);
- msleep(QLC_83XX_DRV_LOCK_RECOVERY_DELAY);
+ mdelay(QLC_83XX_DRV_LOCK_RECOVERY_DELAY);
val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
id = ((val >> 2) & 0xF);
if (id == adapter->portnum) {
@@ -3027,7 +3027,7 @@ int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter)
if (status)
break;
- msleep(QLC_83XX_DRV_LOCK_WAIT_DELAY);
+ mdelay(QLC_83XX_DRV_LOCK_WAIT_DELAY);
i++;
if (i == 1)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 36324126db6d..6bc923326268 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -3020,6 +3020,7 @@ static int __maybe_unused ravb_resume(struct device *dev)
ret = ravb_open(ndev);
if (ret < 0)
return ret;
+ ravb_set_rx_mode(ndev);
netif_device_attach(ndev);
}
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 88fa29572e23..ddcc325ed570 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -218,6 +218,7 @@ netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
skb->len, skb->data_len, channel->channel);
if (!efx->n_channels || !efx->n_tx_channels || !channel) {
netif_stop_queue(net_dev);
+ dev_kfree_skb_any(skb);
goto err;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index c25bfecb4a2d..e5cfde1cbd5c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -748,6 +748,8 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
if (fc & FLOW_RX) {
pr_debug("\tReceive Flow-Control ON\n");
flow |= GMAC_RX_FLOW_CTRL_RFE;
+ } else {
+ pr_debug("\tReceive Flow-Control OFF\n");
}
writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8273e6a175c8..23ec0a9e396c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1061,8 +1061,16 @@ static void stmmac_mac_link_up(struct phylink_config *config,
ctrl |= priv->hw->link.duplex;
/* Flow Control operation */
- if (tx_pause && rx_pause)
- stmmac_mac_flow_ctrl(priv, duplex);
+ if (rx_pause && tx_pause)
+ priv->flow_ctrl = FLOW_AUTO;
+ else if (rx_pause && !tx_pause)
+ priv->flow_ctrl = FLOW_RX;
+ else if (!rx_pause && tx_pause)
+ priv->flow_ctrl = FLOW_TX;
+ else
+ priv->flow_ctrl = FLOW_OFF;
+
+ stmmac_mac_flow_ctrl(priv, duplex);
if (ctrl != old_ctrl)
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
@@ -6548,6 +6556,9 @@ void stmmac_xdp_release(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
+ /* Ensure tx function is not running */
+ netif_tx_disable(dev);
+
/* Disable NAPI process */
stmmac_disable_all_queues(priv);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index c50b137f92d7..d04a239c658e 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2082,7 +2082,7 @@ static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
- if (port->ndev)
+ if (port->ndev && port->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(port->ndev);
}
}
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index de94921cbef9..025e0c19ec25 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -98,6 +98,7 @@ struct ipvl_port {
struct sk_buff_head backlog;
int count;
struct ida ida;
+ netdevice_tracker dev_tracker;
};
struct ipvl_skb_cb {
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 54c94a69c2bb..796a38f9d7b2 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -83,6 +83,7 @@ static int ipvlan_port_create(struct net_device *dev)
if (err)
goto err;
+ netdev_hold(dev, &port->dev_tracker, GFP_KERNEL);
return 0;
err:
@@ -95,6 +96,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
struct sk_buff *skb;
+ netdev_put(dev, &port->dev_tracker);
if (port->mode == IPVLAN_MODE_L3S)
ipvlan_l3s_unregister(port);
netdev_rx_handler_unregister(dev);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 14e8d04cb434..2e9742952c4e 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -211,7 +211,7 @@ static __net_init int loopback_net_init(struct net *net)
int err;
err = -ENOMEM;
- dev = alloc_netdev(0, "lo", NET_NAME_UNKNOWN, loopback_setup);
+ dev = alloc_netdev(0, "lo", NET_NAME_PREDICTABLE, loopback_setup);
if (!dev)
goto out;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 85376d2f24ca..f41f67b583db 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3835,7 +3835,6 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
- int ret;
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (!ops) {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 578897aaada0..b8cc55b2d721 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -141,7 +141,7 @@ static struct macvlan_source_entry *macvlan_hash_lookup_source(
u32 idx = macvlan_eth_hash(addr);
struct hlist_head *h = &vlan->port->vlan_source_hash[idx];
- hlist_for_each_entry_rcu(entry, h, hlist) {
+ hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
if (ether_addr_equal_64bits(entry->addr, addr) &&
entry->vlan == vlan)
return entry;
@@ -1647,7 +1647,7 @@ static int macvlan_fill_info_macaddr(struct sk_buff *skb,
struct hlist_head *h = &vlan->port->vlan_source_hash[i];
struct macvlan_source_entry *entry;
- hlist_for_each_entry_rcu(entry, h, hlist) {
+ hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
if (entry->vlan != vlan)
continue;
if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr))
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index 0762c735dd8a..1d67a3ca1fd1 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -43,6 +43,7 @@
enum {
MCTP_I2C_FLOW_STATE_NEW = 0,
MCTP_I2C_FLOW_STATE_ACTIVE,
+ MCTP_I2C_FLOW_STATE_INVALID,
};
/* List of all struct mctp_i2c_client
@@ -374,12 +375,18 @@ mctp_i2c_get_tx_flow_state(struct mctp_i2c_dev *midev, struct sk_buff *skb)
*/
if (!key->valid) {
state = MCTP_I2C_TX_FLOW_INVALID;
-
- } else if (key->dev_flow_state == MCTP_I2C_FLOW_STATE_NEW) {
- key->dev_flow_state = MCTP_I2C_FLOW_STATE_ACTIVE;
- state = MCTP_I2C_TX_FLOW_NEW;
} else {
- state = MCTP_I2C_TX_FLOW_EXISTING;
+ switch (key->dev_flow_state) {
+ case MCTP_I2C_FLOW_STATE_NEW:
+ key->dev_flow_state = MCTP_I2C_FLOW_STATE_ACTIVE;
+ state = MCTP_I2C_TX_FLOW_NEW;
+ break;
+ case MCTP_I2C_FLOW_STATE_ACTIVE:
+ state = MCTP_I2C_TX_FLOW_EXISTING;
+ break;
+ default:
+ state = MCTP_I2C_TX_FLOW_INVALID;
+ }
}
spin_unlock_irqrestore(&key->lock, flags);
@@ -617,21 +624,31 @@ static void mctp_i2c_release_flow(struct mctp_dev *mdev,
{
struct mctp_i2c_dev *midev = netdev_priv(mdev->dev);
+ bool queue_release = false;
unsigned long flags;
spin_lock_irqsave(&midev->lock, flags);
- midev->release_count++;
- spin_unlock_irqrestore(&midev->lock, flags);
-
- /* Ensure we have a release operation queued, through the fake
- * marker skb
+ /* if we have seen the flow/key previously, we need to pair the
+ * original lock with a release
*/
- spin_lock(&midev->tx_queue.lock);
- if (!midev->unlock_marker.next)
- __skb_queue_tail(&midev->tx_queue, &midev->unlock_marker);
- spin_unlock(&midev->tx_queue.lock);
+ if (key->dev_flow_state == MCTP_I2C_FLOW_STATE_ACTIVE) {
+ midev->release_count++;
+ queue_release = true;
+ }
+ key->dev_flow_state = MCTP_I2C_FLOW_STATE_INVALID;
+ spin_unlock_irqrestore(&midev->lock, flags);
- wake_up(&midev->tx_wq);
+ if (queue_release) {
+ /* Ensure we have a release operation queued, through the fake
+ * marker skb
+ */
+ spin_lock(&midev->tx_queue.lock);
+ if (!midev->unlock_marker.next)
+ __skb_queue_tail(&midev->tx_queue,
+ &midev->unlock_marker);
+ spin_unlock(&midev->tx_queue.lock);
+ wake_up(&midev->tx_wq);
+ }
}
static const struct net_device_ops mctp_i2c_ops = {
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index 689e728345ce..eb344f6d4a7b 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -148,7 +148,7 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
/* Associate the fwnode with the device structure so it
* can be looked up later.
*/
- phy->mdio.dev.fwnode = child;
+ phy->mdio.dev.fwnode = fwnode_handle_get(child);
/* All data is now stored in the phy struct, so register it */
rc = phy_device_register(phy);
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index 0b1b6f650104..0b9d37979133 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -343,6 +343,8 @@ static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev)
kfree_skb(mhi_netdev->skbagg_head);
+ free_netdev(ndev);
+
dev_set_drvdata(&mhi_dev->dev, NULL);
}
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index a7880c7ce94c..68e56e451b2b 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -1683,6 +1683,7 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
ARRAY_SIZE(nsim_devlink_params));
devl_resources_unregister(devlink);
kfree(nsim_dev->vfconfigs);
+ kfree(nsim_dev->fa_cookie);
devl_unlock(devlink);
devlink_free(devlink);
dev_set_drvdata(&nsim_bus_dev->dev, NULL);
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 464d88ca8ab0..a4abea921046 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -484,7 +484,14 @@ static int __init ntb_netdev_init_module(void)
rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
if (rc)
return rc;
- return ntb_transport_register_client(&ntb_netdev_client);
+
+ rc = ntb_transport_register_client(&ntb_netdev_client);
+ if (rc) {
+ ntb_transport_unregister_client_dev(KBUILD_MODNAME);
+ return rc;
+ }
+
+ return 0;
}
module_init(ntb_netdev_init_module);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 349b7b1dbbf2..d49965907561 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -870,8 +870,10 @@ static int at803x_probe(struct phy_device *phydev)
.wolopts = 0,
};
- if (ccr < 0)
+ if (ccr < 0) {
+ ret = ccr;
goto err;
+ }
mode_cfg = ccr & AT803X_MODE_CFG_MASK;
switch (mode_cfg) {
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 417527f8bbf5..7446d5c6c714 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -682,6 +682,13 @@ static int dp83867_of_init(struct phy_device *phydev)
*/
dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN / 2;
+ /* For non-OF device, the RX and TX FIFO depths are taken from
+ * default value. So, we init RX & TX FIFO depths here
+ * so that it is configured correctly later in dp83867_config_init();
+ */
+ dp83867->tx_fifo_depth = DP83867_PHYCR_FIFO_DEPTH_4_B_NIB;
+ dp83867->rx_fifo_depth = DP83867_PHYCR_FIFO_DEPTH_4_B_NIB;
+
return 0;
}
#endif /* CONFIG_OF_MDIO */
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2810f4f9da0c..0d706ee266af 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -2015,14 +2015,16 @@ static int m88e1510_loopback(struct phy_device *phydev, bool enable)
if (err < 0)
return err;
- /* FIXME: Based on trial and error test, it seem 1G need to have
- * delay between soft reset and loopback enablement.
- */
- if (phydev->speed == SPEED_1000)
- msleep(1000);
+ err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+ BMCR_LOOPBACK);
- return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
- BMCR_LOOPBACK);
+ if (!err) {
+ /* It takes some time for PHY device to switch
+ * into/out-of loopback mode.
+ */
+ msleep(1000);
+ }
+ return err;
} else {
err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0);
if (err < 0)
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 57849ac0384e..8cff61dbc4b5 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -217,6 +217,7 @@ static void phy_mdio_device_free(struct mdio_device *mdiodev)
static void phy_device_release(struct device *dev)
{
+ fwnode_handle_put(dev->fwnode);
kfree(to_phy_device(dev));
}
@@ -1520,6 +1521,7 @@ error:
error_module_put:
module_put(d->driver->owner);
+ d->driver = NULL;
error_put_device:
put_device(d);
if (ndev_owner != bus->owner)
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 6547b6cc6cbe..2805b04d6402 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1603,19 +1603,29 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
linkmode_copy(supported, phy->supported);
linkmode_copy(config.advertising, phy->advertising);
- /* Clause 45 PHYs switch their Serdes lane between several different
- * modes, normally 10GBASE-R, SGMII. Some use 2500BASE-X for 2.5G
- * speeds. We really need to know which interface modes the PHY and
- * MAC supports to properly work out which linkmodes can be supported.
+ /* Check whether we would use rate matching for the proposed interface
+ * mode.
*/
- if (phy->is_c45 &&
+ config.rate_matching = phy_get_rate_matching(phy, interface);
+
+ /* Clause 45 PHYs may switch their Serdes lane between, e.g. 10GBASE-R,
+ * 5GBASE-R, 2500BASE-X and SGMII if they are not using rate matching.
+ * For some interface modes (e.g. RXAUI, XAUI and USXGMII) switching
+ * their Serdes is either unnecessary or not reasonable.
+ *
+ * For these which switch interface modes, we really need to know which
+ * interface modes the PHY supports to properly work out which ethtool
+ * linkmodes can be supported. For now, as a work-around, we validate
+ * against all interface modes, which may lead to more ethtool link
+ * modes being advertised than are actually supported.
+ */
+ if (phy->is_c45 && config.rate_matching == RATE_MATCH_NONE &&
interface != PHY_INTERFACE_MODE_RXAUI &&
interface != PHY_INTERFACE_MODE_XAUI &&
interface != PHY_INTERFACE_MODE_USXGMII)
config.interface = PHY_INTERFACE_MODE_NA;
else
config.interface = interface;
- config.rate_matching = phy_get_rate_matching(phy, config.interface);
ret = phylink_validate(pl, supported, &config);
if (ret) {
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index 83fcaeb2ac5e..a52ee2bf5575 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -1391,12 +1391,21 @@ static int __init tbnet_init(void)
tb_property_add_immediate(tbnet_dir, "prtcstns", flags);
ret = tb_register_property_dir("network", tbnet_dir);
- if (ret) {
- tb_property_free_dir(tbnet_dir);
- return ret;
- }
+ if (ret)
+ goto err_free_dir;
+
+ ret = tb_register_service_driver(&tbnet_driver);
+ if (ret)
+ goto err_unregister;
- return tb_register_service_driver(&tbnet_driver);
+ return 0;
+
+err_unregister:
+ tb_unregister_property_dir("network", tbnet_dir);
+err_free_dir:
+ tb_property_free_dir(tbnet_dir);
+
+ return ret;
}
module_init(tbnet_init);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7a3ab3427369..24001112c323 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -686,7 +686,6 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
if (tun)
xdp_rxq_info_unreg(&tfile->xdp_rxq);
ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
- sock_put(&tfile->sk);
}
}
@@ -702,6 +701,9 @@ static void tun_detach(struct tun_file *tfile, bool clean)
if (dev)
netdev_state_change(dev);
rtnl_unlock();
+
+ if (clean)
+ sock_put(&tfile->sk);
}
static void tun_detach_all(struct net_device *dev)
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 8d5cbda33f66..0897fdb6254b 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1915,6 +1915,7 @@ static const struct driver_info cdc_ncm_zlp_info = {
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
+ .set_rx_mode = usbnet_cdc_update_filter,
};
/* Same as cdc_ncm_info, but with FLAG_WWAN */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 26c34a7c21bd..554d4e2a84a4 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1357,6 +1357,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
@@ -1422,6 +1423,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
+ {QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index bfb58c91db04..32d2c60d334d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -66,6 +66,7 @@ struct smsc95xx_priv {
spinlock_t mac_cr_lock;
u8 features;
u8 suspend_flags;
+ bool is_internal_phy;
struct irq_chip irqchip;
struct irq_domain *irqdomain;
struct fwnode_handle *irqfwnode;
@@ -252,6 +253,43 @@ done:
mutex_unlock(&dev->phy_mutex);
}
+static int smsc95xx_mdiobus_reset(struct mii_bus *bus)
+{
+ struct smsc95xx_priv *pdata;
+ struct usbnet *dev;
+ u32 val;
+ int ret;
+
+ dev = bus->priv;
+ pdata = dev->driver_priv;
+
+ if (pdata->is_internal_phy)
+ return 0;
+
+ mutex_lock(&dev->phy_mutex);
+
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+ if (ret < 0)
+ goto reset_out;
+
+ val |= PM_CTL_PHY_RST_;
+
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+ if (ret < 0)
+ goto reset_out;
+
+ /* Driver has no knowledge at this point about the external PHY.
+ * The 802.3 specifies that the reset process shall
+ * be completed within 0.5 s.
+ */
+ fsleep(500000);
+
+reset_out:
+ mutex_unlock(&dev->phy_mutex);
+
+ return 0;
+}
+
static int smsc95xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
{
struct usbnet *dev = bus->priv;
@@ -1052,7 +1090,6 @@ static void smsc95xx_handle_link_change(struct net_device *net)
static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata;
- bool is_internal_phy;
char usb_path[64];
int ret, phy_irq;
u32 val;
@@ -1133,13 +1170,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret < 0)
goto free_mdio;
- is_internal_phy = !(val & HW_CFG_PSEL_);
- if (is_internal_phy)
+ pdata->is_internal_phy = !(val & HW_CFG_PSEL_);
+ if (pdata->is_internal_phy)
pdata->mdiobus->phy_mask = ~(1u << SMSC95XX_INTERNAL_PHY_ID);
pdata->mdiobus->priv = dev;
pdata->mdiobus->read = smsc95xx_mdiobus_read;
pdata->mdiobus->write = smsc95xx_mdiobus_write;
+ pdata->mdiobus->reset = smsc95xx_mdiobus_reset;
pdata->mdiobus->name = "smsc95xx-mdiobus";
pdata->mdiobus->parent = &dev->udev->dev;
@@ -1160,7 +1198,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
}
pdata->phydev->irq = phy_irq;
- pdata->phydev->is_internal = is_internal_phy;
+ pdata->phydev->is_internal = pdata->is_internal_phy;
/* detect device revision as different features may be available */
ret = smsc95xx_read_reg(dev, ID_REV, &val);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7106932c6f88..86e52454b5b5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3949,12 +3949,11 @@ static int virtnet_probe(struct virtio_device *vdev)
return 0;
free_unregister_netdev:
- virtio_reset_device(vdev);
-
unregister_netdev(dev);
free_failover:
net_failover_destroy(vi->failover);
free_vqs:
+ virtio_reset_device(vdev);
cancel_delayed_work_sync(&vi->refill);
free_receive_page_frags(vi);
virtnet_del_vqs(vi);
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 9bbfff803357..b545d93c6e37 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -959,30 +959,51 @@ static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u32 len, u8 sta_ch)
return;
while (index + sizeof(*e) <= len) {
+ u16 attr_size;
+
e = (struct wilc_attr_entry *)&buf[index];
- if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST)
+ attr_size = le16_to_cpu(e->attr_len);
+
+ if (index + sizeof(*e) + attr_size > len)
+ return;
+
+ if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST &&
+ attr_size >= (sizeof(struct wilc_attr_ch_list) - sizeof(*e)))
ch_list_idx = index;
- else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL)
+ else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL &&
+ attr_size == (sizeof(struct wilc_attr_oper_ch) - sizeof(*e)))
op_ch_idx = index;
+
if (ch_list_idx && op_ch_idx)
break;
- index += le16_to_cpu(e->attr_len) + sizeof(*e);
+
+ index += sizeof(*e) + attr_size;
}
if (ch_list_idx) {
- u16 attr_size;
- struct wilc_ch_list_elem *e;
- int i;
+ u16 elem_size;
ch_list = (struct wilc_attr_ch_list *)&buf[ch_list_idx];
- attr_size = le16_to_cpu(ch_list->attr_len);
- for (i = 0; i < attr_size;) {
+ /* the number of bytes following the final 'elem' member */
+ elem_size = le16_to_cpu(ch_list->attr_len) -
+ (sizeof(*ch_list) - sizeof(struct wilc_attr_entry));
+ for (unsigned int i = 0; i < elem_size;) {
+ struct wilc_ch_list_elem *e;
+
e = (struct wilc_ch_list_elem *)(ch_list->elem + i);
+
+ i += sizeof(*e);
+ if (i > elem_size)
+ break;
+
+ i += e->no_of_channels;
+ if (i > elem_size)
+ break;
+
if (e->op_class == WILC_WLAN_OPERATING_CLASS_2_4GHZ) {
memset(e->ch_list, sta_ch, e->no_of_channels);
break;
}
- i += e->no_of_channels;
}
}
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index eb1d1ba3a443..67df8221b5ae 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -482,14 +482,25 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len);
if (rsn_ie) {
+ int rsn_ie_len = sizeof(struct element) + rsn_ie[1];
int offset = 8;
- param->mode_802_11i = 2;
- param->rsn_found = true;
/* extract RSN capabilities */
- offset += (rsn_ie[offset] * 4) + 2;
- offset += (rsn_ie[offset] * 4) + 2;
- memcpy(param->rsn_cap, &rsn_ie[offset], 2);
+ if (offset < rsn_ie_len) {
+ /* skip over pairwise suites */
+ offset += (rsn_ie[offset] * 4) + 2;
+
+ if (offset < rsn_ie_len) {
+ /* skip over authentication suites */
+ offset += (rsn_ie[offset] * 4) + 2;
+
+ if (offset + 1 < rsn_ie_len) {
+ param->mode_802_11i = 2;
+ param->rsn_found = true;
+ memcpy(param->rsn_cap, &rsn_ie[offset], 2);
+ }
+ }
+ }
}
if (param->rsn_found) {
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
index d41e373f9c0a..d6b166fc5c0e 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
@@ -365,7 +365,8 @@ static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
/* Pass the DL packet to the netif layer. */
static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
struct iosm_wwan *wwan, u32 offset,
- u8 service_class, struct sk_buff *skb)
+ u8 service_class, struct sk_buff *skb,
+ u32 pkt_len)
{
struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
@@ -373,7 +374,7 @@ static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
return -ENOMEM;
skb_pull(dest_skb, offset);
- skb_set_tail_pointer(dest_skb, dest_skb->len);
+ skb_trim(dest_skb, pkt_len);
/* Pass the packet to the netif layer. */
dest_skb->priority = service_class;
@@ -429,7 +430,7 @@ static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
struct sk_buff *skb)
{
- u32 pad_len, packet_offset;
+ u32 pad_len, packet_offset, adgh_len;
struct iosm_wwan *wwan;
struct mux_adgh *adgh;
u8 *block = skb->data;
@@ -470,10 +471,12 @@ static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
packet_offset = sizeof(*adgh) + pad_len;
if_id += ipc_mux->wwan_q_offset;
+ adgh_len = le16_to_cpu(adgh->length);
/* Pass the packet to the netif layer */
rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
- adgh->service_class, skb);
+ adgh->service_class, skb,
+ adgh_len - packet_offset);
if (rc) {
dev_err(ipc_mux->dev, "mux adgh decoding error");
return;
@@ -547,7 +550,7 @@ static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
int if_id, int nr_of_dg)
{
u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
- u32 packet_offset, i, rc;
+ u32 packet_offset, i, rc, dg_len;
for (i = 0; i < nr_of_dg; i++, dg++) {
if (le32_to_cpu(dg->datagram_index)
@@ -562,11 +565,12 @@ static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
packet_offset =
le32_to_cpu(dg->datagram_index) +
dl_head_pad_len;
+ dg_len = le16_to_cpu(dg->datagram_length);
/* Pass the packet to the netif layer. */
rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
packet_offset,
- dg->service_class,
- skb);
+ dg->service_class, skb,
+ dg_len - dl_head_pad_len);
if (rc)
goto dg_error;
}
@@ -1207,10 +1211,9 @@ static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
qlth_n_ql_size, ul_list);
ipc_mux_ul_adb_finish(ipc_mux);
if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
- IOSM_AGGR_MUX_SIG_ADBH)) {
- dev_kfree_skb(src_skb);
+ IOSM_AGGR_MUX_SIG_ADBH))
return -ENOMEM;
- }
+
ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
ipc_mux->size_needed += offsetof(struct mux_adth, dg);
@@ -1471,8 +1474,7 @@ void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
ipc_mux->ul_data_pend_bytes);
/* Reset the skb settings. */
- skb->tail = 0;
- skb->len = 0;
+ skb_trim(skb, 0);
/* Add the consumed ADB to the free list. */
skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index d3d34d1c4704..5bf5a93937c9 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -249,7 +249,7 @@ static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
if (object->integer.value == 3)
sleep_state = IPC_PCIE_D3L2;
- kfree(object);
+ ACPI_FREE(object);
default_ret:
return sleep_state;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol.h b/drivers/net/wwan/iosm/iosm_ipc_protocol.h
index 9b3a6d86ece7..289397c4ea6c 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_protocol.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol.h
@@ -122,7 +122,7 @@ struct iosm_protocol {
struct iosm_imem *imem;
struct ipc_rsp *rsp_ring[IPC_MEM_MSG_ENTRIES];
struct device *dev;
- phys_addr_t phy_ap_shm;
+ dma_addr_t phy_ap_shm;
u32 old_msg_tail;
};
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
index 3458af31e864..7d0f5e4f0a78 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -165,6 +165,8 @@ static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
return -EFAULT;
}
+ kfree(buffer.pointer);
+
#endif
return 0;
}
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 24436c9e54c9..97600826af69 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -112,8 +112,10 @@ static int nfcmrvl_i2c_nci_send(struct nfcmrvl_private *priv,
struct nfcmrvl_i2c_drv_data *drv_data = priv->drv_data;
int ret;
- if (test_bit(NFCMRVL_PHY_ERROR, &priv->flags))
+ if (test_bit(NFCMRVL_PHY_ERROR, &priv->flags)) {
+ kfree_skb(skb);
return -EREMOTEIO;
+ }
ret = i2c_master_send(drv_data->i2c, skb->data, skb->len);
diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c
index 580cb6ecffee..66b198663387 100644
--- a/drivers/nfc/nxp-nci/core.c
+++ b/drivers/nfc/nxp-nci/core.c
@@ -73,11 +73,15 @@ static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
struct nxp_nci_info *info = nci_get_drvdata(ndev);
int r;
- if (!info->phy_ops->write)
+ if (!info->phy_ops->write) {
+ kfree_skb(skb);
return -EOPNOTSUPP;
+ }
- if (info->mode != NXP_NCI_MODE_NCI)
+ if (info->mode != NXP_NCI_MODE_NCI) {
+ kfree_skb(skb);
return -EINVAL;
+ }
r = info->phy_ops->write(info->phy_id, skb);
if (r < 0) {
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
index 0270e05b68df..aec356880adf 100644
--- a/drivers/nfc/s3fwrn5/core.c
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -105,6 +105,7 @@ static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
mutex_lock(&info->mutex);
if (s3fwrn5_get_mode(info) != S3FWRN5_MODE_NCI) {
+ kfree_skb(skb);
mutex_unlock(&info->mutex);
return -EINVAL;
}
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index 7764b1a4c3cf..ec87dd21e054 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -312,6 +312,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
int r = 0;
struct device *dev = &ndev->nfc_dev->dev;
struct nfc_evt_transaction *transaction;
+ u32 aid_len;
+ u8 params_len;
pr_debug("connectivity gate event: %x\n", event);
@@ -325,26 +327,47 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
* Description Tag Length
* AID 81 5 to 16
* PARAMETERS 82 0 to 255
+ *
+ * The key differences are aid storage length is variably sized
+ * in the packet, but fixed in nfc_evt_transaction, and that
+ * the aid_len is u8 in the packet, but u32 in the structure,
+ * and the tags in the packet are not included in
+ * nfc_evt_transaction.
+ *
+ * size(b): 1 1 5-16 1 1 0-255
+ * offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4
+ * mem name: aid_tag(M) aid_len aid params_tag(M) params_len params
+ * example: 0x81 5-16 X 0x82 0-255 X
*/
- if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
- skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
+ if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
- transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
- if (!transaction)
- return -ENOMEM;
+ aid_len = skb->data[1];
- transaction->aid_len = skb->data[1];
- memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
+ if (skb->len < aid_len + 4 ||
+ aid_len > sizeof(transaction->aid))
+ return -EPROTO;
- /* Check next byte is PARAMETERS tag (82) */
- if (skb->data[transaction->aid_len + 2] !=
- NFC_EVT_TRANSACTION_PARAMS_TAG)
+ params_len = skb->data[aid_len + 3];
+
+ /* Verify PARAMETERS tag is (82), and final check that there is
+ * enough space in the packet to read everything.
+ */
+ if (skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG ||
+ skb->len < aid_len + 4 + params_len)
return -EPROTO;
- transaction->params_len = skb->data[transaction->aid_len + 3];
- memcpy(transaction->params, skb->data +
- transaction->aid_len + 4, transaction->params_len);
+ transaction = devm_kzalloc(dev, sizeof(*transaction) +
+ params_len, GFP_KERNEL);
+ if (!transaction)
+ return -ENOMEM;
+
+ transaction->aid_len = aid_len;
+ transaction->params_len = params_len;
+
+ memcpy(transaction->aid, &skb->data[2], aid_len);
+ memcpy(transaction->params, &skb->data[aid_len + 4],
+ params_len);
r = nfc_se_transaction(ndev->nfc_dev, host, transaction);
break;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index da55ce45ac70..69e333922bea 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4304,7 +4304,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_unlock(&ns->ctrl->subsys->lock);
/* guarantee not available in head->list */
- synchronize_rcu();
+ synchronize_srcu(&ns->head->srcu);
if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 93e2138a8b42..7e025b8948cb 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -174,11 +174,14 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
struct nvme_ns_head *head = ns->head;
sector_t capacity = get_capacity(head->disk);
int node;
+ int srcu_idx;
+ srcu_idx = srcu_read_lock(&head->srcu);
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (capacity != get_capacity(ns->disk))
clear_bit(NVME_NS_READY, &ns->flags);
}
+ srcu_read_unlock(&head->srcu, srcu_idx);
for_each_node(node)
rcu_assign_pointer(head->current_path[node], NULL);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 02b5578773a1..488ad7dabeb8 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -797,6 +797,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
if (bv->bv_len > first_prp_len)
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
+ else
+ cmnd->dptr.prp2 = 0;
return BLK_STS_OK;
}
@@ -3489,6 +3491,8 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN },
+ { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */
@@ -3519,6 +3523,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index c4113b43dbfe..4dcddcf95279 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -45,9 +45,11 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
if (!dhchap_secret)
return -ENOMEM;
if (set_ctrl) {
+ kfree(host->dhchap_ctrl_secret);
host->dhchap_ctrl_secret = strim(dhchap_secret);
host->dhchap_ctrl_key_hash = key_hash;
} else {
+ kfree(host->dhchap_secret);
host->dhchap_secret = strim(dhchap_secret);
host->dhchap_key_hash = key_hash;
}
diff --git a/drivers/nvmem/lan9662-otpc.c b/drivers/nvmem/lan9662-otpc.c
index f6732fd216d8..56fc19f092a7 100644
--- a/drivers/nvmem/lan9662-otpc.c
+++ b/drivers/nvmem/lan9662-otpc.c
@@ -36,7 +36,7 @@ struct lan9662_otp {
void __iomem *base;
};
-static bool lan9662_otp_wait_flag_clear(void __iomem *reg, u32 flag)
+static int lan9662_otp_wait_flag_clear(void __iomem *reg, u32 flag)
{
u32 val;
@@ -203,7 +203,7 @@ static int lan9662_otp_probe(struct platform_device *pdev)
}
static const struct of_device_id lan9662_otp_match[] = {
- { .compatible = "microchip,lan9662-otp", },
+ { .compatible = "microchip,lan9662-otpc", },
{ },
};
MODULE_DEVICE_TABLE(of, lan9662_otp_match);
diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c
index b11c3c974b3d..80cb187f1481 100644
--- a/drivers/nvmem/rmem.c
+++ b/drivers/nvmem/rmem.c
@@ -37,9 +37,9 @@ static int rmem_read(void *context, unsigned int offset,
* but as of Dec 2020 this isn't possible on arm64.
*/
addr = memremap(priv->mem->base, available, MEMREMAP_WB);
- if (IS_ERR(addr)) {
+ if (!addr) {
dev_err(priv->dev, "Failed to remap memory region\n");
- return PTR_ERR(addr);
+ return -ENOMEM;
}
count = memory_read_from_buffer(val, bytes, &off, addr, available);
diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c
index 8e72d1bbd649..4fdbdccebda1 100644
--- a/drivers/nvmem/u-boot-env.c
+++ b/drivers/nvmem/u-boot-env.c
@@ -135,7 +135,7 @@ static int u_boot_env_parse(struct u_boot_env *priv)
break;
case U_BOOT_FORMAT_REDUNDANT:
crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32);
- crc32_data_offset = offsetof(struct u_boot_env_image_redundant, mark);
+ crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data);
data_offset = offsetof(struct u_boot_env_image_redundant, data);
break;
}
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 967f79b59016..134cfc980b70 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -993,8 +993,10 @@ of_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
nargs, index, &of_args);
if (ret < 0)
return ret;
- if (!args)
+ if (!args) {
+ of_node_put(of_args.np);
return 0;
+ }
args->nargs = of_args.args_count;
args->fwnode = of_fwnode_handle(of_args.np);
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 7c45927e2131..5784dc20fb38 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -468,7 +468,7 @@ static size_t parport_pc_fifo_write_block_pio(struct parport *port,
const unsigned char *bufp = buf;
size_t left = length;
unsigned long expire = jiffies + port->physport->cad->timeout;
- const int fifo = FIFO(port);
+ const unsigned long fifo = FIFO(port);
int poll_for = 8; /* 80 usecs */
const struct parport_pc_private *priv = port->physport->private_data;
const int fifo_depth = priv->fifo_depth;
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index ba64284eaf9f..f1ec8931dfbc 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1613,7 +1613,7 @@ out:
}
static u32 hv_compose_msi_req_v1(
- struct pci_create_interrupt *int_pkt, const struct cpumask *affinity,
+ struct pci_create_interrupt *int_pkt,
u32 slot, u8 vector, u16 vector_count)
{
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
@@ -1632,6 +1632,35 @@ static u32 hv_compose_msi_req_v1(
}
/*
+ * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
+ * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
+ * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
+ * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
+ * not irrelevant because Hyper-V chooses the physical CPU to handle the
+ * interrupts based on the vCPU specified in message sent to the vPCI VSP in
+ * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
+ * but assigning too many vPCI device interrupts to the same pCPU can cause a
+ * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
+ * to spread out the pCPUs that it selects.
+ *
+ * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
+ * to always return the same dummy vCPU, because a second call to
+ * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
+ * new pCPU for the interrupt. But for the multi-MSI case, the second call to
+ * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
+ * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
+ * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
+ * the same pCPU, even though the vCPUs will be spread out by later calls
+ * to hv_irq_unmask(), but that is the best we can do now.
+ *
+ * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
+ * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
+ * enhancement is planned for a future version. With that enhancement, the
+ * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
+ * device will be spread across multiple pCPUs.
+ */
+
+/*
* Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
* by subsequent retarget in hv_irq_unmask().
*/
@@ -1640,18 +1669,39 @@ static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
return cpumask_first_and(affinity, cpu_online_mask);
}
-static u32 hv_compose_msi_req_v2(
- struct pci_create_interrupt2 *int_pkt, const struct cpumask *affinity,
- u32 slot, u8 vector, u16 vector_count)
+/*
+ * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
+ */
+static int hv_compose_multi_msi_req_get_cpu(void)
{
+ static DEFINE_SPINLOCK(multi_msi_cpu_lock);
+
+ /* -1 means starting with CPU 0 */
+ static int cpu_next = -1;
+
+ unsigned long flags;
int cpu;
+ spin_lock_irqsave(&multi_msi_cpu_lock, flags);
+
+ cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
+ false);
+ cpu = cpu_next;
+
+ spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
+
+ return cpu;
+}
+
+static u32 hv_compose_msi_req_v2(
+ struct pci_create_interrupt2 *int_pkt, int cpu,
+ u32 slot, u8 vector, u16 vector_count)
+{
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
- cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
hv_cpu_number_to_vp_number(cpu);
int_pkt->int_desc.processor_count = 1;
@@ -1660,18 +1710,15 @@ static u32 hv_compose_msi_req_v2(
}
static u32 hv_compose_msi_req_v3(
- struct pci_create_interrupt3 *int_pkt, const struct cpumask *affinity,
+ struct pci_create_interrupt3 *int_pkt, int cpu,
u32 slot, u32 vector, u16 vector_count)
{
- int cpu;
-
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.reserved = 0;
int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
- cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
hv_cpu_number_to_vp_number(cpu);
int_pkt->int_desc.processor_count = 1;
@@ -1715,12 +1762,18 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct pci_create_interrupt3 v3;
} int_pkts;
} __packed ctxt;
+ bool multi_msi;
u64 trans_id;
u32 size;
int ret;
+ int cpu;
+
+ msi_desc = irq_data_get_msi_desc(data);
+ multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
+ msi_desc->nvec_used > 1;
/* Reuse the previous allocation */
- if (data->chip_data) {
+ if (data->chip_data && multi_msi) {
int_desc = data->chip_data;
msg->address_hi = int_desc->address >> 32;
msg->address_lo = int_desc->address & 0xffffffff;
@@ -1728,7 +1781,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
return;
}
- msi_desc = irq_data_get_msi_desc(data);
pdev = msi_desc_to_pci_dev(msi_desc);
dest = irq_data_get_effective_affinity_mask(data);
pbus = pdev->bus;
@@ -1738,11 +1790,18 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
if (!hpdev)
goto return_null_message;
+ /* Free any previous message that might have already been composed. */
+ if (data->chip_data && !multi_msi) {
+ int_desc = data->chip_data;
+ data->chip_data = NULL;
+ hv_int_desc_free(hpdev, int_desc);
+ }
+
int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
if (!int_desc)
goto drop_reference;
- if (!msi_desc->pci.msi_attrib.is_msix && msi_desc->nvec_used > 1) {
+ if (multi_msi) {
/*
* If this is not the first MSI of Multi MSI, we already have
* a mapping. Can exit early.
@@ -1767,9 +1826,11 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
*/
vector = 32;
vector_count = msi_desc->nvec_used;
+ cpu = hv_compose_multi_msi_req_get_cpu();
} else {
vector = hv_msi_get_int_vector(data);
vector_count = 1;
+ cpu = hv_compose_msi_req_get_cpu(dest);
}
/*
@@ -1785,7 +1846,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
switch (hbus->protocol_version) {
case PCI_PROTOCOL_VERSION_1_1:
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
- dest,
hpdev->desc.win_slot.slot,
(u8)vector,
vector_count);
@@ -1794,7 +1854,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
case PCI_PROTOCOL_VERSION_1_2:
case PCI_PROTOCOL_VERSION_1_3:
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
- dest,
+ cpu,
hpdev->desc.win_slot.slot,
(u8)vector,
vector_count);
@@ -1802,7 +1862,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
case PCI_PROTOCOL_VERSION_1_4:
size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
- dest,
+ cpu,
hpdev->desc.win_slot.slot,
vector,
vector_count);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index ef898ee8ca6b..6e0a40962f38 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -220,6 +220,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
for (state = 0; ; state++) {
/* Retrieve the pinctrl-* property */
propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
+ if (!propname)
+ return -ENOMEM;
prop = of_find_property(np, propname, &size);
kfree(propname);
if (!prop) {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 52ecd66ce357..047a8374b4fd 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -436,9 +436,14 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
writel(value, padcfg0);
}
+static int __intel_gpio_get_gpio_mode(u32 value)
+{
+ return (value & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
+}
+
static int intel_gpio_get_gpio_mode(void __iomem *padcfg0)
{
- return (readl(padcfg0) & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
+ return __intel_gpio_get_gpio_mode(readl(padcfg0));
}
static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
@@ -1674,6 +1679,7 @@ EXPORT_SYMBOL_GPL(intel_pinctrl_get_soc_data);
static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin)
{
const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
+ u32 value;
if (!pd || !intel_pad_usable(pctrl, pin))
return false;
@@ -1688,6 +1694,25 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int
gpiochip_line_is_irq(&pctrl->chip, intel_pin_to_gpio(pctrl, pin)))
return true;
+ /*
+ * The firmware on some systems may configure GPIO pins to be
+ * an interrupt source in so called "direct IRQ" mode. In such
+ * cases the GPIO controller driver has no idea if those pins
+ * are being used or not. At the same time, there is a known bug
+ * in the firmwares that don't restore the pin settings correctly
+ * after suspend, i.e. by an unknown reason the Rx value becomes
+ * inverted.
+ *
+ * Hence, let's save and restore the pins that are configured
+ * as GPIOs in the input mode with GPIROUTIOXAPIC bit set.
+ *
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=214749.
+ */
+ value = readl(intel_get_padcfg(pctrl, pin, PADCFG0));
+ if ((value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
+ (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO))
+ return true;
+
return false;
}
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index f7b54a551764..27f0a54e12bf 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -24,6 +24,7 @@
#define MTK_EINT_EDGE_SENSITIVE 0
#define MTK_EINT_LEVEL_SENSITIVE 1
#define MTK_EINT_DBNC_SET_DBNC_BITS 4
+#define MTK_EINT_DBNC_MAX 16
#define MTK_EINT_DBNC_RST_BIT (0x1 << 1)
#define MTK_EINT_DBNC_SET_EN (0x1 << 0)
@@ -48,6 +49,21 @@ static const struct mtk_eint_regs mtk_generic_eint_regs = {
.dbnc_clr = 0x700,
};
+const unsigned int debounce_time_mt2701[] = {
+ 500, 1000, 16000, 32000, 64000, 128000, 256000, 0
+};
+EXPORT_SYMBOL_GPL(debounce_time_mt2701);
+
+const unsigned int debounce_time_mt6765[] = {
+ 125, 250, 500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0
+};
+EXPORT_SYMBOL_GPL(debounce_time_mt6765);
+
+const unsigned int debounce_time_mt6795[] = {
+ 500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0
+};
+EXPORT_SYMBOL_GPL(debounce_time_mt6795);
+
static void __iomem *mtk_eint_get_offset(struct mtk_eint *eint,
unsigned int eint_num,
unsigned int offset)
@@ -287,12 +303,15 @@ static struct irq_chip mtk_eint_irq_chip = {
static unsigned int mtk_eint_hw_init(struct mtk_eint *eint)
{
- void __iomem *reg = eint->base + eint->regs->dom_en;
+ void __iomem *dom_en = eint->base + eint->regs->dom_en;
+ void __iomem *mask_set = eint->base + eint->regs->mask_set;
unsigned int i;
for (i = 0; i < eint->hw->ap_num; i += 32) {
- writel(0xffffffff, reg);
- reg += 4;
+ writel(0xffffffff, dom_en);
+ writel(0xffffffff, mask_set);
+ dom_en += 4;
+ mask_set += 4;
}
return 0;
@@ -404,10 +423,11 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
int virq, eint_offset;
unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask,
dbnc;
- static const unsigned int debounce_time[] = {500, 1000, 16000, 32000,
- 64000, 128000, 256000};
struct irq_data *d;
+ if (!eint->hw->db_time)
+ return -EOPNOTSUPP;
+
virq = irq_find_mapping(eint->domain, eint_num);
eint_offset = (eint_num % 4) * 8;
d = irq_get_irq_data(virq);
@@ -418,9 +438,9 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
if (!mtk_eint_can_en_debounce(eint, eint_num))
return -EINVAL;
- dbnc = ARRAY_SIZE(debounce_time);
- for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
- if (debounce <= debounce_time[i]) {
+ dbnc = eint->num_db_time;
+ for (i = 0; i < eint->num_db_time; i++) {
+ if (debounce <= eint->hw->db_time[i]) {
dbnc = i;
break;
}
@@ -494,6 +514,13 @@ int mtk_eint_do_init(struct mtk_eint *eint)
if (!eint->domain)
return -ENOMEM;
+ if (eint->hw->db_time) {
+ for (i = 0; i < MTK_EINT_DBNC_MAX; i++)
+ if (eint->hw->db_time[i] == 0)
+ break;
+ eint->num_db_time = i;
+ }
+
mtk_eint_hw_init(eint);
for (i = 0; i < eint->hw->ap_num; i++) {
int virq = irq_create_mapping(eint->domain, i);
diff --git a/drivers/pinctrl/mediatek/mtk-eint.h b/drivers/pinctrl/mediatek/mtk-eint.h
index 48468d0fae68..6139b16cd225 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.h
+++ b/drivers/pinctrl/mediatek/mtk-eint.h
@@ -37,8 +37,13 @@ struct mtk_eint_hw {
u8 ports;
unsigned int ap_num;
unsigned int db_cnt;
+ const unsigned int *db_time;
};
+extern const unsigned int debounce_time_mt2701[];
+extern const unsigned int debounce_time_mt6765[];
+extern const unsigned int debounce_time_mt6795[];
+
struct mtk_eint;
struct mtk_eint_xt {
@@ -62,6 +67,7 @@ struct mtk_eint {
/* Used to fit into various EINT device */
const struct mtk_eint_hw *hw;
const struct mtk_eint_regs *regs;
+ u16 num_db_time;
/* Used to fit into various pinctrl device */
void *pctl;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt2701.c b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
index d1583b4fdd9d..b185538452a0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt2701.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
@@ -518,6 +518,7 @@ static const struct mtk_pinctrl_devdata mt2701_pinctrl_data = {
.ports = 6,
.ap_num = 169,
.db_cnt = 16,
+ .db_time = debounce_time_mt2701,
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt2712.c b/drivers/pinctrl/mediatek/pinctrl-mt2712.c
index b921068f9e69..730a496848dc 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt2712.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt2712.c
@@ -567,6 +567,7 @@ static const struct mtk_pinctrl_devdata mt2712_pinctrl_data = {
.ports = 8,
.ap_num = 229,
.db_cnt = 40,
+ .db_time = debounce_time_mt2701,
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6765.c b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
index c57b19fcda03..f6ec41eb6e0c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt6765.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
@@ -1062,6 +1062,7 @@ static const struct mtk_eint_hw mt6765_eint_hw = {
.ports = 6,
.ap_num = 160,
.db_cnt = 13,
+ .db_time = debounce_time_mt6765,
};
static const struct mtk_pin_soc mt6765_data = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6779.c b/drivers/pinctrl/mediatek/pinctrl-mt6779.c
index 4ddf8bda6827..62d4f5ad6737 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt6779.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6779.c
@@ -737,6 +737,7 @@ static const struct mtk_eint_hw mt6779_eint_hw = {
.ports = 6,
.ap_num = 195,
.db_cnt = 13,
+ .db_time = debounce_time_mt2701,
};
static const struct mtk_pin_soc mt6779_data = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6795.c b/drivers/pinctrl/mediatek/pinctrl-mt6795.c
index f90152261a0f..01e855ccd4dd 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt6795.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6795.c
@@ -475,6 +475,7 @@ static const struct mtk_eint_hw mt6795_eint_hw = {
.ports = 7,
.ap_num = 224,
.db_cnt = 32,
+ .db_time = debounce_time_mt6795,
};
static const unsigned int mt6795_pull_type[] = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index 68eee881ee3d..3c1148d59eff 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -846,6 +846,7 @@ static const struct mtk_eint_hw mt7622_eint_hw = {
.ports = 7,
.ap_num = ARRAY_SIZE(mt7622_pins),
.db_cnt = 20,
+ .db_time = debounce_time_mt6765,
};
static const struct mtk_pin_soc mt7622_data = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7623.c b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
index b8d9d31db74f..699977074697 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7623.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
@@ -1369,6 +1369,7 @@ static const struct mtk_eint_hw mt7623_eint_hw = {
.ports = 6,
.ap_num = 169,
.db_cnt = 20,
+ .db_time = debounce_time_mt2701,
};
static struct mtk_pin_soc mt7623_data = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7629.c b/drivers/pinctrl/mediatek/pinctrl-mt7629.c
index b5f0fa43245f..2ce411cb9c6e 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7629.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7629.c
@@ -402,6 +402,7 @@ static const struct mtk_eint_hw mt7629_eint_hw = {
.ports = 7,
.ap_num = ARRAY_SIZE(mt7629_pins),
.db_cnt = 16,
+ .db_time = debounce_time_mt2701,
};
static struct mtk_pin_soc mt7629_data = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7986.c b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
index f26869f1a367..50cb736f9f11 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7986.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
@@ -826,6 +826,7 @@ static const struct mtk_eint_hw mt7986a_eint_hw = {
.ports = 7,
.ap_num = ARRAY_SIZE(mt7986a_pins),
.db_cnt = 16,
+ .db_time = debounce_time_mt6765,
};
static const struct mtk_eint_hw mt7986b_eint_hw = {
@@ -833,6 +834,7 @@ static const struct mtk_eint_hw mt7986b_eint_hw = {
.ports = 7,
.ap_num = ARRAY_SIZE(mt7986b_pins),
.db_cnt = 16,
+ .db_time = debounce_time_mt6765,
};
static struct mtk_pin_soc mt7986a_data = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8127.c b/drivers/pinctrl/mediatek/pinctrl-mt8127.c
index 91c530e7b00e..e8772dcfe69e 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8127.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8127.c
@@ -286,6 +286,7 @@ static const struct mtk_pinctrl_devdata mt8127_pinctrl_data = {
.ports = 6,
.ap_num = 143,
.db_cnt = 16,
+ .db_time = debounce_time_mt2701,
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8135.c b/drivers/pinctrl/mediatek/pinctrl-mt8135.c
index 562846756517..cdb0252071fb 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8135.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8135.c
@@ -315,6 +315,7 @@ static const struct mtk_pinctrl_devdata mt8135_pinctrl_data = {
.ports = 6,
.ap_num = 192,
.db_cnt = 16,
+ .db_time = debounce_time_mt2701,
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8167.c b/drivers/pinctrl/mediatek/pinctrl-mt8167.c
index 825167f5d020..866da2c4a890 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8167.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8167.c
@@ -319,6 +319,7 @@ static const struct mtk_pinctrl_devdata mt8167_pinctrl_data = {
.ports = 6,
.ap_num = 169,
.db_cnt = 64,
+ .db_time = debounce_time_mt6795,
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8173.c b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
index 1d7d11a32e7d..37d8cec1c3ce 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8173.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
@@ -327,6 +327,7 @@ static const struct mtk_pinctrl_devdata mt8173_pinctrl_data = {
.ports = 6,
.ap_num = 224,
.db_cnt = 16,
+ .db_time = debounce_time_mt2701,
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8183.c b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
index fecb1e64fff4..ddc48b725c22 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8183.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
@@ -545,6 +545,7 @@ static const struct mtk_eint_hw mt8183_eint_hw = {
.ports = 6,
.ap_num = 212,
.db_cnt = 13,
+ .db_time = debounce_time_mt6765,
};
static const struct mtk_pin_soc mt8183_data = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8186.c b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
index a4dd5197abc1..a02f7c326970 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8186.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
@@ -1222,6 +1222,7 @@ static const struct mtk_eint_hw mt8186_eint_hw = {
.ports = 7,
.ap_num = 217,
.db_cnt = 32,
+ .db_time = debounce_time_mt6765,
};
static const struct mtk_pin_soc mt8186_data = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8188.c b/drivers/pinctrl/mediatek/pinctrl-mt8188.c
index d0e75c1b4417..6a3d0126288e 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8188.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8188.c
@@ -1625,6 +1625,7 @@ static const struct mtk_eint_hw mt8188_eint_hw = {
.ports = 7,
.ap_num = 225,
.db_cnt = 32,
+ .db_time = debounce_time_mt6765,
};
static const struct mtk_pin_soc mt8188_data = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
index 78c02b7c81f0..9695f4ec6aba 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
@@ -1371,6 +1371,7 @@ static const struct mtk_eint_hw mt8192_eint_hw = {
.ports = 7,
.ap_num = 224,
.db_cnt = 32,
+ .db_time = debounce_time_mt6765,
};
static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8195.c b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
index 563693d3d4c2..89557c7ed2ab 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8195.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
@@ -935,6 +935,7 @@ static const struct mtk_eint_hw mt8195_eint_hw = {
.ports = 7,
.ap_num = 225,
.db_cnt = 32,
+ .db_time = debounce_time_mt6765,
};
static const struct mtk_pin_soc mt8195_data = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8365.c b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
index 57f37a294063..e31b89b226b7 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8365.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
@@ -453,6 +453,7 @@ static const struct mtk_pinctrl_devdata mt8365_pinctrl_data = {
.ports = 5,
.ap_num = 160,
.db_cnt = 160,
+ .db_time = debounce_time_mt6765,
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8516.c b/drivers/pinctrl/mediatek/pinctrl-mt8516.c
index 939a1932b8dc..e929339dd2cb 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8516.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8516.c
@@ -319,6 +319,7 @@ static const struct mtk_pinctrl_devdata mt8516_pinctrl_data = {
.ports = 6,
.ap_num = 169,
.db_cnt = 64,
+ .db_time = debounce_time_mt6795,
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index e1ae3beb9f72..b7921b59eb7b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -709,6 +709,9 @@ static int mtk_pinconf_bias_set_rsel(struct mtk_pinctrl *hw,
{
int err, rsel_val;
+ if (!pullup && arg == MTK_DISABLE)
+ return 0;
+
if (hw->rsel_si_unit) {
/* find pin rsel_index from pin_rsel array*/
err = mtk_hw_pin_rsel_lookup(hw, desc, pullup, arg, &rsel_val);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 53bdfc40f055..da974ff2d75d 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -679,14 +679,54 @@ static void rockchip_get_recalced_mux(struct rockchip_pin_bank *bank, int pin,
}
static struct rockchip_mux_route_data px30_mux_route_data[] = {
+ RK_MUXROUTE_SAME(2, RK_PB4, 1, 0x184, BIT(16 + 7)), /* cif-d0m0 */
+ RK_MUXROUTE_SAME(3, RK_PA1, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d0m1 */
+ RK_MUXROUTE_SAME(2, RK_PB6, 1, 0x184, BIT(16 + 7)), /* cif-d1m0 */
+ RK_MUXROUTE_SAME(3, RK_PA2, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d1m1 */
RK_MUXROUTE_SAME(2, RK_PA0, 1, 0x184, BIT(16 + 7)), /* cif-d2m0 */
RK_MUXROUTE_SAME(3, RK_PA3, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d2m1 */
+ RK_MUXROUTE_SAME(2, RK_PA1, 1, 0x184, BIT(16 + 7)), /* cif-d3m0 */
+ RK_MUXROUTE_SAME(3, RK_PA5, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d3m1 */
+ RK_MUXROUTE_SAME(2, RK_PA2, 1, 0x184, BIT(16 + 7)), /* cif-d4m0 */
+ RK_MUXROUTE_SAME(3, RK_PA7, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d4m1 */
+ RK_MUXROUTE_SAME(2, RK_PA3, 1, 0x184, BIT(16 + 7)), /* cif-d5m0 */
+ RK_MUXROUTE_SAME(3, RK_PB0, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d5m1 */
+ RK_MUXROUTE_SAME(2, RK_PA4, 1, 0x184, BIT(16 + 7)), /* cif-d6m0 */
+ RK_MUXROUTE_SAME(3, RK_PB1, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d6m1 */
+ RK_MUXROUTE_SAME(2, RK_PA5, 1, 0x184, BIT(16 + 7)), /* cif-d7m0 */
+ RK_MUXROUTE_SAME(3, RK_PB4, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d7m1 */
+ RK_MUXROUTE_SAME(2, RK_PA6, 1, 0x184, BIT(16 + 7)), /* cif-d8m0 */
+ RK_MUXROUTE_SAME(3, RK_PB6, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d8m1 */
+ RK_MUXROUTE_SAME(2, RK_PA7, 1, 0x184, BIT(16 + 7)), /* cif-d9m0 */
+ RK_MUXROUTE_SAME(3, RK_PB7, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d9m1 */
+ RK_MUXROUTE_SAME(2, RK_PB7, 1, 0x184, BIT(16 + 7)), /* cif-d10m0 */
+ RK_MUXROUTE_SAME(3, RK_PC6, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d10m1 */
+ RK_MUXROUTE_SAME(2, RK_PC0, 1, 0x184, BIT(16 + 7)), /* cif-d11m0 */
+ RK_MUXROUTE_SAME(3, RK_PC7, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d11m1 */
+ RK_MUXROUTE_SAME(2, RK_PB0, 1, 0x184, BIT(16 + 7)), /* cif-vsyncm0 */
+ RK_MUXROUTE_SAME(3, RK_PD1, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-vsyncm1 */
+ RK_MUXROUTE_SAME(2, RK_PB1, 1, 0x184, BIT(16 + 7)), /* cif-hrefm0 */
+ RK_MUXROUTE_SAME(3, RK_PD2, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-hrefm1 */
+ RK_MUXROUTE_SAME(2, RK_PB2, 1, 0x184, BIT(16 + 7)), /* cif-clkinm0 */
+ RK_MUXROUTE_SAME(3, RK_PD3, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-clkinm1 */
+ RK_MUXROUTE_SAME(2, RK_PB3, 1, 0x184, BIT(16 + 7)), /* cif-clkoutm0 */
+ RK_MUXROUTE_SAME(3, RK_PD0, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-clkoutm1 */
RK_MUXROUTE_SAME(3, RK_PC6, 2, 0x184, BIT(16 + 8)), /* pdm-m0 */
RK_MUXROUTE_SAME(2, RK_PC6, 1, 0x184, BIT(16 + 8) | BIT(8)), /* pdm-m1 */
+ RK_MUXROUTE_SAME(3, RK_PD3, 2, 0x184, BIT(16 + 8)), /* pdm-sdi0m0 */
+ RK_MUXROUTE_SAME(2, RK_PC5, 2, 0x184, BIT(16 + 8) | BIT(8)), /* pdm-sdi0m1 */
RK_MUXROUTE_SAME(1, RK_PD3, 2, 0x184, BIT(16 + 10)), /* uart2-rxm0 */
RK_MUXROUTE_SAME(2, RK_PB6, 2, 0x184, BIT(16 + 10) | BIT(10)), /* uart2-rxm1 */
+ RK_MUXROUTE_SAME(1, RK_PD2, 2, 0x184, BIT(16 + 10)), /* uart2-txm0 */
+ RK_MUXROUTE_SAME(2, RK_PB4, 2, 0x184, BIT(16 + 10) | BIT(10)), /* uart2-txm1 */
RK_MUXROUTE_SAME(0, RK_PC1, 2, 0x184, BIT(16 + 9)), /* uart3-rxm0 */
RK_MUXROUTE_SAME(1, RK_PB7, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-rxm1 */
+ RK_MUXROUTE_SAME(0, RK_PC0, 2, 0x184, BIT(16 + 9)), /* uart3-txm0 */
+ RK_MUXROUTE_SAME(1, RK_PB6, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-txm1 */
+ RK_MUXROUTE_SAME(0, RK_PC2, 2, 0x184, BIT(16 + 9)), /* uart3-ctsm0 */
+ RK_MUXROUTE_SAME(1, RK_PB4, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-ctsm1 */
+ RK_MUXROUTE_SAME(0, RK_PC3, 2, 0x184, BIT(16 + 9)), /* uart3-rtsm0 */
+ RK_MUXROUTE_SAME(1, RK_PB5, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-rtsm1 */
};
static struct rockchip_mux_route_data rv1126_mux_route_data[] = {
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 67bec7ea0f8b..414ee6bb8ac9 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -727,7 +727,7 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
mux_bytes = pcs->width / BITS_PER_BYTE;
- if (pcs->bits_per_mux) {
+ if (pcs->bits_per_mux && pcs->fmask) {
pcs->bits_per_pin = fls(pcs->fmask);
nr_pins = (pcs->size * BITS_PER_BYTE) / pcs->bits_per_pin;
} else {
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
index aa2075390f3e..e96c00686a25 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
@@ -1873,8 +1873,8 @@ static const struct msm_pingroup sc8280xp_groups[] = {
[225] = PINGROUP(225, hs3_mi2s, phase_flag, _, _, _, _, egpio),
[226] = PINGROUP(226, hs3_mi2s, phase_flag, _, _, _, _, egpio),
[227] = PINGROUP(227, hs3_mi2s, phase_flag, _, _, _, _, egpio),
- [228] = UFS_RESET(ufs_reset, 0xf1004),
- [229] = UFS_RESET(ufs1_reset, 0xf3004),
+ [228] = UFS_RESET(ufs_reset, 0xf1000),
+ [229] = UFS_RESET(ufs1_reset, 0xf3000),
[230] = SDC_QDSD_PINGROUP(sdc2_clk, 0xe8000, 14, 6),
[231] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xe8000, 11, 3),
[232] = SDC_QDSD_PINGROUP(sdc2_data, 0xe8000, 9, 0),
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
index 6748fe4ac5d5..def8d7ac541f 100644
--- a/drivers/platform/surface/aggregator/ssh_packet_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
@@ -1596,16 +1596,32 @@ static void ssh_ptl_timeout_reap(struct work_struct *work)
ssh_ptl_tx_wakeup_packet(ptl);
}
-static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq)
+static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, const struct ssh_frame *frame)
{
int i;
/*
+ * Ignore unsequenced packets. On some devices (notably Surface Pro 9),
+ * unsequenced events will always be sent with SEQ=0x00. Attempting to
+ * detect retransmission would thus just block all events.
+ *
+ * While sequence numbers would also allow detection of retransmitted
+ * packets in unsequenced communication, they have only ever been used
+ * to cover edge-cases in sequenced transmission. In particular, the
+ * only instance of packets being retransmitted (that we are aware of)
+ * is due to an ACK timeout. As this does not happen in unsequenced
+ * communication, skip the retransmission check for those packets
+ * entirely.
+ */
+ if (frame->type == SSH_FRAME_TYPE_DATA_NSQ)
+ return false;
+
+ /*
* Check if SEQ has been seen recently (i.e. packet was
* re-transmitted and we should ignore it).
*/
for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) {
- if (likely(ptl->rx.blocked.seqs[i] != seq))
+ if (likely(ptl->rx.blocked.seqs[i] != frame->seq))
continue;
ptl_dbg(ptl, "ptl: ignoring repeated data packet\n");
@@ -1613,7 +1629,7 @@ static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq)
}
/* Update list of blocked sequence IDs. */
- ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = seq;
+ ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = frame->seq;
ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1)
% ARRAY_SIZE(ptl->rx.blocked.seqs);
@@ -1624,7 +1640,7 @@ static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl,
const struct ssh_frame *frame,
const struct ssam_span *payload)
{
- if (ssh_ptl_rx_retransmit_check(ptl, frame->seq))
+ if (ssh_ptl_rx_retransmit_check(ptl, frame))
return;
ptl->ops.data_received(ptl, payload);
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index 585911020cea..023f126121d7 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -234,6 +234,19 @@ static const struct software_node *ssam_node_group_sl3[] = {
NULL,
};
+/* Devices for Surface Laptop 5. */
+static const struct software_node *ssam_node_group_sl5[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ &ssam_node_hid_main_keyboard,
+ &ssam_node_hid_main_touchpad,
+ &ssam_node_hid_main_iid5,
+ &ssam_node_hid_sam_ucm_ucsi,
+ NULL,
+};
+
/* Devices for Surface Laptop Studio. */
static const struct software_node *ssam_node_group_sls[] = {
&ssam_node_root,
@@ -268,6 +281,7 @@ static const struct software_node *ssam_node_group_sp7[] = {
NULL,
};
+/* Devices for Surface Pro 8 */
static const struct software_node *ssam_node_group_sp8[] = {
&ssam_node_root,
&ssam_node_hub_kip,
@@ -284,6 +298,23 @@ static const struct software_node *ssam_node_group_sp8[] = {
NULL,
};
+/* Devices for Surface Pro 9 */
+static const struct software_node *ssam_node_group_sp9[] = {
+ &ssam_node_root,
+ &ssam_node_hub_kip,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ /* TODO: Tablet mode switch (via POS subsystem) */
+ &ssam_node_hid_kip_keyboard,
+ &ssam_node_hid_kip_penstash,
+ &ssam_node_hid_kip_touchpad,
+ &ssam_node_hid_kip_fwupd,
+ &ssam_node_hid_sam_sensors,
+ &ssam_node_hid_sam_ucm_ucsi,
+ NULL,
+};
+
/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
@@ -303,6 +334,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
/* Surface Pro 8 */
{ "MSHW0263", (unsigned long)ssam_node_group_sp8 },
+ /* Surface Pro 9 */
+ { "MSHW0343", (unsigned long)ssam_node_group_sp9 },
+
/* Surface Book 2 */
{ "MSHW0107", (unsigned long)ssam_node_group_gen5 },
@@ -324,6 +358,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
/* Surface Laptop 4 (13", Intel) */
{ "MSHW0250", (unsigned long)ssam_node_group_sl3 },
+ /* Surface Laptop 5 */
+ { "MSHW0350", (unsigned long)ssam_node_group_sl5 },
+
/* Surface Laptop Go 1 */
{ "MSHW0118", (unsigned long)ssam_node_group_slg1 },
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 18224f9a5bc0..ee67efdd5499 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -566,6 +566,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
},
{
.callback = set_force_caps,
+ .ident = "Acer Aspire Switch V 10 SW5-017",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
+ },
+ .driver_data = (void *)ACER_CAP_KBD_DOCK,
+ },
+ {
+ .callback = set_force_caps,
.ident = "Acer One 10 (S1003)",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
index 96e790e639a2..ef4ae977b8e0 100644
--- a/drivers/platform/x86/amd/pmc.c
+++ b/drivers/platform/x86/amd/pmc.c
@@ -276,7 +276,6 @@ static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = {
.release = amd_pmc_stb_debugfs_release_v2,
};
-#if defined(CONFIG_SUSPEND) || defined(CONFIG_DEBUG_FS)
static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
{
if (dev->cpu_id == AMD_CPU_ID_PCO) {
@@ -351,7 +350,6 @@ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table
memcpy_fromio(table, pdev->smu_virt_addr, sizeof(struct smu_metrics));
return 0;
}
-#endif /* CONFIG_SUSPEND || CONFIG_DEBUG_FS */
#ifdef CONFIG_SUSPEND
static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
@@ -964,6 +962,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
{"AMDI0006", 0},
{"AMDI0007", 0},
{"AMDI0008", 0},
+ {"AMDI0009", 0},
{"AMD0004", 0},
{"AMD0005", 0},
{ }
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 6e8e093f96b3..872efc1d5b36 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -1738,6 +1738,8 @@ static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
cpu_to_le32(ports_available));
+ pci_dev_put(xhci_pdev);
+
pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n",
orig_ports_available, ports_available);
}
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 12449038bed1..0a99058be813 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -90,6 +90,7 @@ enum hp_wmi_event_ids {
HPWMI_PEAKSHIFT_PERIOD = 0x0F,
HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
HPWMI_SANITIZATION_MODE = 0x17,
+ HPWMI_SMART_EXPERIENCE_APP = 0x21,
};
/*
@@ -859,6 +860,8 @@ static void hp_wmi_notify(u32 value, void *context)
break;
case HPWMI_SANITIZATION_MODE:
break;
+ case HPWMI_SMART_EXPERIENCE_APP:
+ break;
default:
pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
break;
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 33b3dfdd1b08..3ea8fc6a9ca3 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -136,6 +136,7 @@ struct ideapad_private {
bool dytc : 1;
bool fan_mode : 1;
bool fn_lock : 1;
+ bool set_fn_lock_led : 1;
bool hw_rfkill_switch : 1;
bool kbd_bl : 1;
bool touchpad_ctrl_via_ec : 1;
@@ -154,7 +155,21 @@ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
static bool allow_v4_dytc;
module_param(allow_v4_dytc, bool, 0444);
-MODULE_PARM_DESC(allow_v4_dytc, "Enable DYTC version 4 platform-profile support.");
+MODULE_PARM_DESC(allow_v4_dytc,
+ "Enable DYTC version 4 platform-profile support. "
+ "If you need this please report this to: platform-driver-x86@vger.kernel.org");
+
+static bool hw_rfkill_switch;
+module_param(hw_rfkill_switch, bool, 0444);
+MODULE_PARM_DESC(hw_rfkill_switch,
+ "Enable rfkill support for laptops with a hw on/off wifi switch/slider. "
+ "If you need this please report this to: platform-driver-x86@vger.kernel.org");
+
+static bool set_fn_lock_led;
+module_param(set_fn_lock_led, bool, 0444);
+MODULE_PARM_DESC(set_fn_lock_led,
+ "Enable driver based updates of the fn-lock LED on fn-lock changes. "
+ "If you need this please report this to: platform-driver-x86@vger.kernel.org");
/*
* ACPI Helpers
@@ -1501,6 +1516,9 @@ static void ideapad_wmi_notify(u32 value, void *context)
ideapad_input_report(priv, value);
break;
case 208:
+ if (!priv->features.set_fn_lock_led)
+ break;
+
if (!eval_hals(priv->adev->handle, &result)) {
bool state = test_bit(HALS_FNLOCK_STATE_BIT, &result);
@@ -1514,6 +1532,18 @@ static void ideapad_wmi_notify(u32 value, void *context)
}
#endif
+/* On some models we need to call exec_sals(SALS_FNLOCK_ON/OFF) to set the LED */
+static const struct dmi_system_id set_fn_lock_led_list[] = {
+ {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=212671 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion R7000P2020H"),
+ }
+ },
+ {}
+};
+
/*
* Some ideapads have a hardware rfkill switch, but most do not have one.
* Reading VPCCMD_R_RF always results in 0 on models without a hardware rfkill,
@@ -1556,7 +1586,10 @@ static void ideapad_check_features(struct ideapad_private *priv)
acpi_handle handle = priv->adev->handle;
unsigned long val;
- priv->features.hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
+ priv->features.set_fn_lock_led =
+ set_fn_lock_led || dmi_check_system(set_fn_lock_led_list);
+ priv->features.hw_rfkill_switch =
+ hw_rfkill_switch || dmi_check_system(hw_rfkill_list);
/* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
if (acpi_dev_present("ELAN0634", NULL, -1))
diff --git a/drivers/platform/x86/intel/pmc/pltdrv.c b/drivers/platform/x86/intel/pmc/pltdrv.c
index 15ca8afdd973..ddfba38c2104 100644
--- a/drivers/platform/x86/intel/pmc/pltdrv.c
+++ b/drivers/platform/x86/intel/pmc/pltdrv.c
@@ -18,6 +18,8 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <xen/xen.h>
+
static void intel_pmc_core_release(struct device *dev)
{
kfree(dev);
@@ -53,6 +55,13 @@ static int __init pmc_core_platform_init(void)
if (acpi_dev_present("INT33A1", NULL, -1))
return -ENODEV;
+ /*
+ * Skip forcefully attaching the device for VMs. Make an exception for
+ * Xen dom0, which does have full hardware access.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR) && !xen_initial_domain())
+ return -ENODEV;
+
if (!x86_match_cpu(intel_pmc_core_platform_ids))
return -ENODEV;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 20e5c043a8e8..8476dfef4e62 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -4497,6 +4497,14 @@ static const struct dmi_system_id fwbug_list[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
}
},
+ {
+ .ident = "P14s Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
+ }
+ },
{}
};
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 863fabe05bdc..307ee6f71042 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -725,7 +725,14 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
/* Get thermal zone and ADC */
di->tz = thermal_zone_get_zone_by_name("battery-thermal");
if (IS_ERR(di->tz)) {
- return dev_err_probe(dev, PTR_ERR(di->tz),
+ ret = PTR_ERR(di->tz);
+ /*
+ * This usually just means we are probing before the thermal
+ * zone, so just defer.
+ */
+ if (ret == -ENODEV)
+ ret = -EPROBE_DEFER;
+ return dev_err_probe(dev, ret,
"failed to get battery thermal zone\n");
}
di->bat_ctrl = devm_iio_channel_get(dev, "bat_ctrl");
diff --git a/drivers/power/supply/ip5xxx_power.c b/drivers/power/supply/ip5xxx_power.c
index 218e8e689a3f..00221e9c0bfc 100644
--- a/drivers/power/supply/ip5xxx_power.c
+++ b/drivers/power/supply/ip5xxx_power.c
@@ -352,7 +352,7 @@ static int ip5xxx_battery_get_property(struct power_supply *psy,
ret = ip5xxx_battery_read_adc(ip5xxx, IP5XXX_BATIADC_DAT0,
IP5XXX_BATIADC_DAT1, &raw);
- val->intval = DIV_ROUND_CLOSEST(raw * 745985, 1000);
+ val->intval = DIV_ROUND_CLOSEST(raw * 149197, 200);
return 0;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
index 635f051b0821..f20a6ac584cc 100644
--- a/drivers/power/supply/rk817_charger.c
+++ b/drivers/power/supply/rk817_charger.c
@@ -121,7 +121,7 @@ struct rk817_charger {
#define ADC_TO_CHARGE_UAH(adc_value, res_div) \
(adc_value / 3600 * 172 / res_div)
-static u8 rk817_chg_cur_to_reg(u32 chg_cur_ma)
+static int rk817_chg_cur_to_reg(u32 chg_cur_ma)
{
if (chg_cur_ma >= 3500)
return CHG_3_5A;
@@ -864,8 +864,8 @@ static int rk817_battery_init(struct rk817_charger *charger,
{
struct rk808 *rk808 = charger->rk808;
u32 tmp, max_chg_vol_mv, max_chg_cur_ma;
- u8 max_chg_vol_reg, chg_term_i_reg, max_chg_cur_reg;
- int ret, chg_term_ma;
+ u8 max_chg_vol_reg, chg_term_i_reg;
+ int ret, chg_term_ma, max_chg_cur_reg;
u8 bulk_reg[2];
/* Get initial plug state */
@@ -1116,14 +1116,12 @@ static int rk817_charger_probe(struct platform_device *pdev)
charger->bat_ps = devm_power_supply_register(&pdev->dev,
&rk817_bat_desc, &pscfg);
-
- charger->chg_ps = devm_power_supply_register(&pdev->dev,
- &rk817_chg_desc, &pscfg);
-
- if (IS_ERR(charger->chg_ps))
+ if (IS_ERR(charger->bat_ps))
return dev_err_probe(dev, -EINVAL,
"Battery failed to probe\n");
+ charger->chg_ps = devm_power_supply_register(&pdev->dev,
+ &rk817_chg_desc, &pscfg);
if (IS_ERR(charger->chg_ps))
return dev_err_probe(dev, -EINVAL,
"Charger failed to probe\n");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index bcccad8f7516..e8c00a884f1f 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -5154,6 +5154,7 @@ static void regulator_dev_release(struct device *dev)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
+ debugfs_remove_recursive(rdev->debugfs);
kfree(rdev->constraints);
of_node_put(rdev->dev.of_node);
kfree(rdev);
@@ -5644,11 +5645,15 @@ wash:
mutex_lock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
mutex_unlock(&regulator_list_mutex);
+ put_device(&rdev->dev);
+ rdev = NULL;
clean:
if (dangling_of_gpiod)
gpiod_put(config->ena_gpiod);
+ if (rdev && rdev->dev.of_node)
+ of_node_put(rdev->dev.of_node);
+ kfree(rdev);
kfree(config);
- put_device(&rdev->dev);
rinse:
if (dangling_cfg_gpiod)
gpiod_put(cfg->ena_gpiod);
@@ -5677,7 +5682,6 @@ void regulator_unregister(struct regulator_dev *rdev)
mutex_lock(&regulator_list_mutex);
- debugfs_remove_recursive(rdev->debugfs);
WARN_ON(rdev->open_count);
regulator_remove_coupling(rdev);
unset_regulator_supplies(rdev);
diff --git a/drivers/regulator/rt5759-regulator.c b/drivers/regulator/rt5759-regulator.c
index 6b96899eb27e..8488417f4b2c 100644
--- a/drivers/regulator/rt5759-regulator.c
+++ b/drivers/regulator/rt5759-regulator.c
@@ -243,6 +243,7 @@ static int rt5759_regulator_register(struct rt5759_priv *priv)
if (priv->chip_type == CHIP_TYPE_RT5759A)
reg_desc->uV_step = RT5759A_STEP_UV;
+ memset(&reg_cfg, 0, sizeof(reg_cfg));
reg_cfg.dev = priv->dev;
reg_cfg.of_node = np;
reg_cfg.init_data = of_get_regulator_init_data(priv->dev, np, reg_desc);
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
index 75a941fb3c2b..1b2eee95ad3f 100644
--- a/drivers/regulator/slg51000-regulator.c
+++ b/drivers/regulator/slg51000-regulator.c
@@ -457,6 +457,8 @@ static int slg51000_i2c_probe(struct i2c_client *client)
chip->cs_gpiod = cs_gpiod;
}
+ usleep_range(10000, 11000);
+
i2c_set_clientdata(client, chip);
chip->chip_irq = client->irq;
chip->dev = dev;
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index 430265c404d6..f3856750944f 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -67,6 +67,7 @@ struct twlreg_info {
#define TWL6030_CFG_STATE_SLEEP 0x03
#define TWL6030_CFG_STATE_GRP_SHIFT 5
#define TWL6030_CFG_STATE_APP_SHIFT 2
+#define TWL6030_CFG_STATE_MASK 0x03
#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
TWL6030_CFG_STATE_APP_SHIFT)
@@ -128,13 +129,14 @@ static int twl6030reg_is_enabled(struct regulator_dev *rdev)
if (grp < 0)
return grp;
grp &= P1_GRP_6030;
+ val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+ val = TWL6030_CFG_STATE_APP(val);
} else {
+ val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+ val &= TWL6030_CFG_STATE_MASK;
grp = 1;
}
- val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
- val = TWL6030_CFG_STATE_APP(val);
-
return grp && (val == TWL6030_CFG_STATE_ON);
}
@@ -187,7 +189,12 @@ static int twl6030reg_get_status(struct regulator_dev *rdev)
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
- switch (TWL6030_CFG_STATE_APP(val)) {
+ if (info->features & TWL6032_SUBCLASS)
+ val &= TWL6030_CFG_STATE_MASK;
+ else
+ val = TWL6030_CFG_STATE_APP(val);
+
+ switch (val) {
case TWL6030_CFG_STATE_ON:
return REGULATOR_STATUS_NORMAL;
@@ -530,6 +537,7 @@ static const struct twlreg_info TWL6030_INFO_##label = { \
#define TWL6032_ADJUSTABLE_LDO(label, offset) \
static const struct twlreg_info TWL6032_INFO_##label = { \
.base = offset, \
+ .features = TWL6032_SUBCLASS, \
.desc = { \
.name = #label, \
.id = TWL6032_REG_##label, \
@@ -562,6 +570,7 @@ static const struct twlreg_info TWLFIXED_INFO_##label = { \
#define TWL6032_ADJUSTABLE_SMPS(label, offset) \
static const struct twlreg_info TWLSMPS_INFO_##label = { \
.base = offset, \
+ .features = TWL6032_SUBCLASS, \
.desc = { \
.name = #label, \
.id = TWL6032_REG_##label, \
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index cb83f81da416..df17f0f9cb0f 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1954,7 +1954,7 @@ dasd_copy_pair_show(struct device *dev,
break;
}
}
- if (!copy->entry[i].primary)
+ if (i == DASD_CP_ENTRIES)
goto out;
/* print all secondary */
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 662730f3b027..5d0b9991e91a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -4722,7 +4722,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
struct dasd_device *basedev;
struct req_iterator iter;
struct dasd_ccw_req *cqr;
- unsigned int first_offs;
unsigned int trkcount;
unsigned long *idaws;
unsigned int size;
@@ -4756,7 +4755,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
DASD_RAW_SECTORS_PER_TRACK;
trkcount = last_trk - first_trk + 1;
- first_offs = 0;
if (rq_data_dir(req) == READ)
cmd = DASD_ECKD_CCW_READ_TRACK;
@@ -4800,13 +4798,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
if (use_prefix) {
prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
- startdev, 1, first_offs + 1, trkcount, 0, 0);
+ startdev, 1, 0, trkcount, 0, 0);
} else {
define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
ccw[-1].flags |= CCW_FLAG_CC;
data += sizeof(struct DE_eckd_data);
- locate_record_ext(ccw++, data, first_trk, first_offs + 1,
+ locate_record_ext(ccw++, data, first_trk, 0,
trkcount, cmd, basedev, 0, 0);
}
@@ -5500,7 +5498,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
* Dump the range of CCWs into 'page' buffer
* and return number of printed chars.
*/
-static int
+static void
dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
{
int len, count;
@@ -5518,16 +5516,21 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
else
datap = (char *) ((addr_t) from->cda);
- /* dump data (max 32 bytes) */
- for (count = 0; count < from->count && count < 32; count++) {
- if (count % 8 == 0) len += sprintf(page + len, " ");
- if (count % 4 == 0) len += sprintf(page + len, " ");
+ /* dump data (max 128 bytes) */
+ for (count = 0; count < from->count && count < 128; count++) {
+ if (count % 32 == 0)
+ len += sprintf(page + len, "\n");
+ if (count % 8 == 0)
+ len += sprintf(page + len, " ");
+ if (count % 4 == 0)
+ len += sprintf(page + len, " ");
len += sprintf(page + len, "%02x", datap[count]);
}
len += sprintf(page + len, "\n");
from++;
}
- return len;
+ if (len > 0)
+ printk(KERN_ERR "%s", page);
}
static void
@@ -5619,37 +5622,33 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
if (req) {
/* req == NULL for unsolicited interrupts */
/* dump the Channel Program (max 140 Bytes per line) */
- /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
+ /* Count CCW and print first CCWs (maximum 7) */
first = req->cpaddr;
for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
to = min(first + 6, last);
- len = sprintf(page, PRINTK_HEADER
- " Related CP in req: %p\n", req);
- dasd_eckd_dump_ccw_range(first, to, page + len);
- printk(KERN_ERR "%s", page);
+ printk(KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req);
+ dasd_eckd_dump_ccw_range(first, to, page);
/* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
- len = 0;
from = ++to;
fail = (struct ccw1 *)(addr_t)
irb->scsw.cmd.cpa; /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
- len += sprintf(page, PRINTK_HEADER "......\n");
+ printk(KERN_ERR PRINTK_HEADER "......\n");
}
to = min(fail + 1, last);
- len += dasd_eckd_dump_ccw_range(from, to, page + len);
+ dasd_eckd_dump_ccw_range(from, to, page + len);
/* print last CCWs (maximum 2) */
+ len = 0;
from = max(from, ++to);
if (from < last - 1) {
from = last - 1; /* there is a gap - print header */
- len += sprintf(page + len, PRINTK_HEADER "......\n");
+ printk(KERN_ERR PRINTK_HEADER "......\n");
}
- len += dasd_eckd_dump_ccw_range(from, last, page + len);
- if (len > 0)
- printk(KERN_ERR "%s", page);
+ dasd_eckd_dump_ccw_range(from, last, page + len);
}
free_page((unsigned long) page);
}
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index d0ddf2cc9786..9327dcdd6e5e 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -401,7 +401,7 @@ dasd_ioctl_copy_pair_swap(struct block_device *bdev, void __user *argp)
return -EFAULT;
}
if (memchr_inv(data.reserved, 0, sizeof(data.reserved))) {
- pr_warn("%s: Ivalid swap data specified.\n",
+ pr_warn("%s: Invalid swap data specified\n",
dev_name(&device->cdev->dev));
dasd_put_device(device);
return DASD_COPYPAIRSWAP_INVALID;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 93b80da60277..b392b9f5482e 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -636,6 +636,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->minors = DCSSBLK_MINORS_PER_DISK;
dev_info->gd->fops = &dcssblk_devops;
dev_info->gd->private_data = dev_info;
+ dev_info->gd->flags |= GENHD_FL_NO_PART;
blk_queue_logical_block_size(dev_info->gd->queue, 4096);
blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 59ac98f2bd27..b02c631f3b71 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -233,8 +233,11 @@ static void __init ap_init_qci_info(void)
if (!ap_qci_info)
return;
ap_qci_info_old = kzalloc(sizeof(*ap_qci_info_old), GFP_KERNEL);
- if (!ap_qci_info_old)
+ if (!ap_qci_info_old) {
+ kfree(ap_qci_info);
+ ap_qci_info = NULL;
return;
+ }
if (ap_fetch_qci_info(ap_qci_info) != 0) {
kfree(ap_qci_info);
kfree(ap_qci_info_old);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 19223b075568..ab3ea529cca7 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -884,7 +884,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
- int req_id = req->req_id;
+ unsigned long req_id = req->req_id;
zfcp_reqlist_add(adapter->req_list, req);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index f77ee4051b00..3306de7170f6 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -3265,7 +3265,8 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
}
if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
- (scmd->cmnd[0] != ATA_16)) {
+ (scmd->cmnd[0] != ATA_16) &&
+ mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) {
ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
scmd->result);
scsi_print_command(scmd);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 629853662b82..bebda917b138 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -7323,8 +7323,12 @@ static int sdebug_add_host_helper(int per_host_idx)
dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
error = device_register(&sdbg_host->dev);
- if (error)
+ if (error) {
+ spin_lock(&sdebug_host_list_lock);
+ list_del(&sdbg_host->host_list);
+ spin_unlock(&sdebug_host_list_lock);
goto clean;
+ }
++sdebug_num_hosts;
return 0;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index cd3db9684e52..f473c002fa4d 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -231,7 +231,7 @@ iscsi_create_endpoint(int dd_size)
dev_set_name(&ep->dev, "ep-%d", id);
err = device_register(&ep->dev);
if (err)
- goto free_id;
+ goto put_dev;
err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
if (err)
@@ -245,10 +245,12 @@ unregister_dev:
device_unregister(&ep->dev);
return NULL;
-free_id:
+put_dev:
mutex_lock(&iscsi_ep_idr_mutex);
idr_remove(&iscsi_ep_idr, id);
mutex_unlock(&iscsi_ep_idr_mutex);
+ put_device(&ep->dev);
+ return NULL;
free_ep:
kfree(ep);
return NULL;
@@ -766,7 +768,7 @@ iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport,
err = device_register(&iface->dev);
if (err)
- goto free_iface;
+ goto put_dev;
err = sysfs_create_group(&iface->dev.kobj, &iscsi_iface_group);
if (err)
@@ -780,9 +782,8 @@ unreg_iface:
device_unregister(&iface->dev);
return NULL;
-free_iface:
- put_device(iface->dev.parent);
- kfree(iface);
+put_dev:
+ put_device(&iface->dev);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_create_iface);
@@ -1251,15 +1252,15 @@ iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index,
err = device_register(&fnode_sess->dev);
if (err)
- goto free_fnode_sess;
+ goto put_dev;
if (dd_size)
fnode_sess->dd_data = &fnode_sess[1];
return fnode_sess;
-free_fnode_sess:
- kfree(fnode_sess);
+put_dev:
+ put_device(&fnode_sess->dev);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess);
@@ -1299,15 +1300,15 @@ iscsi_create_flashnode_conn(struct Scsi_Host *shost,
err = device_register(&fnode_conn->dev);
if (err)
- goto free_fnode_conn;
+ goto put_dev;
if (dd_size)
fnode_conn->dd_data = &fnode_conn[1];
return fnode_conn;
-free_fnode_conn:
- kfree(fnode_conn);
+put_dev:
+ put_device(&fnode_conn->dev);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn);
@@ -4815,7 +4816,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
dev_set_name(&priv->dev, "%s", tt->name);
err = device_register(&priv->dev);
if (err)
- goto free_priv;
+ goto put_dev;
err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
if (err)
@@ -4850,8 +4851,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
unregister_dev:
device_unregister(&priv->dev);
return NULL;
-free_priv:
- kfree(priv);
+put_dev:
+ put_device(&priv->dev);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_register_transport);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index bc46721aa01c..3c5b7e4227b2 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -303,16 +303,21 @@ enum storvsc_request_type {
};
/*
- * SRB status codes and masks; a subset of the codes used here.
+ * SRB status codes and masks. In the 8-bit field, the two high order bits
+ * are flags, while the remaining 6 bits are an integer status code. The
+ * definitions here include only the subset of the integer status codes that
+ * are tested for in this driver.
*/
-
#define SRB_STATUS_AUTOSENSE_VALID 0x80
#define SRB_STATUS_QUEUE_FROZEN 0x40
-#define SRB_STATUS_INVALID_LUN 0x20
-#define SRB_STATUS_SUCCESS 0x01
-#define SRB_STATUS_ABORTED 0x02
-#define SRB_STATUS_ERROR 0x04
-#define SRB_STATUS_DATA_OVERRUN 0x12
+
+/* SRB status integer codes */
+#define SRB_STATUS_SUCCESS 0x01
+#define SRB_STATUS_ABORTED 0x02
+#define SRB_STATUS_ERROR 0x04
+#define SRB_STATUS_INVALID_REQUEST 0x06
+#define SRB_STATUS_DATA_OVERRUN 0x12
+#define SRB_STATUS_INVALID_LUN 0x20
#define SRB_STATUS(status) \
(status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN))
@@ -969,38 +974,25 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
void (*process_err_fn)(struct work_struct *work);
struct hv_host_device *host_dev = shost_priv(host);
- /*
- * In some situations, Hyper-V sets multiple bits in the
- * srb_status, such as ABORTED and ERROR. So process them
- * individually, with the most specific bits first.
- */
-
- if (vm_srb->srb_status & SRB_STATUS_INVALID_LUN) {
- set_host_byte(scmnd, DID_NO_CONNECT);
- process_err_fn = storvsc_remove_lun;
- goto do_work;
- }
+ switch (SRB_STATUS(vm_srb->srb_status)) {
+ case SRB_STATUS_ERROR:
+ case SRB_STATUS_ABORTED:
+ case SRB_STATUS_INVALID_REQUEST:
+ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) {
+ /* Check for capacity change */
+ if ((asc == 0x2a) && (ascq == 0x9)) {
+ process_err_fn = storvsc_device_scan;
+ /* Retry the I/O that triggered this. */
+ set_host_byte(scmnd, DID_REQUEUE);
+ goto do_work;
+ }
- if (vm_srb->srb_status & SRB_STATUS_ABORTED) {
- if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
- /* Capacity data has changed */
- (asc == 0x2a) && (ascq == 0x9)) {
- process_err_fn = storvsc_device_scan;
/*
- * Retry the I/O that triggered this.
+ * Otherwise, let upper layer deal with the
+ * error when sense message is present
*/
- set_host_byte(scmnd, DID_REQUEUE);
- goto do_work;
- }
- }
-
- if (vm_srb->srb_status & SRB_STATUS_ERROR) {
- /*
- * Let upper layer deal with error when
- * sense message is present.
- */
- if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
return;
+ }
/*
* If there is an error; offline the device since all
@@ -1023,6 +1015,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
default:
set_host_byte(scmnd, DID_ERROR);
}
+ return;
+
+ case SRB_STATUS_INVALID_LUN:
+ set_host_byte(scmnd, DID_NO_CONNECT);
+ process_err_fn = storvsc_remove_lun;
+ goto do_work;
+
}
return;
diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c
index 7c4f32d76966..561408583b2b 100644
--- a/drivers/siox/siox-core.c
+++ b/drivers/siox/siox-core.c
@@ -839,6 +839,8 @@ static struct siox_device *siox_device_add(struct siox_master *smaster,
err_device_register:
/* don't care to make the buffer smaller again */
+ put_device(&sdevice->dev);
+ sdevice = NULL;
err_buf_alloc:
siox_master_unlock(smaster);
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
index 2ed821f75816..a0fdf9d792cb 100644
--- a/drivers/slimbus/Kconfig
+++ b/drivers/slimbus/Kconfig
@@ -23,7 +23,7 @@ config SLIM_QCOM_CTRL
config SLIM_QCOM_NGD_CTRL
tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component"
depends on HAS_IOMEM && DMA_ENGINE && NET
- depends on QCOM_RPROC_COMMON || COMPILE_TEST
+ depends on QCOM_RPROC_COMMON || (COMPILE_TEST && !QCOM_RPROC_COMMON)
depends on ARCH_QCOM || COMPILE_TEST
select QCOM_QMI_HELPERS
select QCOM_PDR_HELPERS
diff --git a/drivers/slimbus/stream.c b/drivers/slimbus/stream.c
index 75f87b3d8b95..73a2aa362957 100644
--- a/drivers/slimbus/stream.c
+++ b/drivers/slimbus/stream.c
@@ -67,10 +67,10 @@ static const int slim_presence_rate_table[] = {
384000,
768000,
0, /* Reserved */
- 110250,
- 220500,
- 441000,
- 882000,
+ 11025,
+ 22050,
+ 44100,
+ 88200,
176400,
352800,
705600,
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index cc57a384d74d..28144c699b0c 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/arm-smccc.h>
#include <linux/of.h>
+#include <linux/clk.h>
#define REV_B1 0x21
@@ -56,6 +57,7 @@ static u32 __init imx8mq_soc_revision(void)
void __iomem *ocotp_base;
u32 magic;
u32 rev;
+ struct clk *clk;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
if (!np)
@@ -63,6 +65,13 @@ static u32 __init imx8mq_soc_revision(void)
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
+ clk = of_clk_get_by_name(np, NULL);
+ if (!clk) {
+ WARN_ON(!clk);
+ return 0;
+ }
+
+ clk_prepare_enable(clk);
/*
* SOC revision on older imx8mq is not available in fuses so query
@@ -79,6 +88,8 @@ static u32 __init imx8mq_soc_revision(void)
soc_uid <<= 32;
soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+ clk_disable_unprepare(clk);
+ clk_put(clk);
iounmap(ocotp_base);
of_node_put(np);
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
index 1322b8cce5b7..ababb910b391 100644
--- a/drivers/spi/spi-dw-dma.c
+++ b/drivers/spi/spi-dw-dma.c
@@ -128,12 +128,15 @@ static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
dw_spi_dma_sg_burst_init(dws);
+ pci_dev_put(dma_dev);
+
return 0;
free_rxchan:
dma_release_channel(dws->rxchan);
dws->rxchan = NULL;
err_exit:
+ pci_dev_put(dma_dev);
return -EBUSY;
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 30d82cc7300b..d209930069cf 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -444,8 +444,7 @@ static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
unsigned int pre, post;
unsigned int fin = spi_imx->spi_clk;
- if (unlikely(fspi > fin))
- return 0;
+ fspi = min(fspi, fin);
post = fls(fin) - fls(fspi);
if (fin > fspi << post)
@@ -1608,6 +1607,13 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
return spi_imx_pio_transfer_slave(spi, transfer);
/*
+ * If we decided in spi_imx_can_dma() that we want to do a DMA
+ * transfer, the SPI transfer has already been mapped, so we
+ * have to do the DMA transfer here.
+ */
+ if (spi_imx->usedma)
+ return spi_imx_dma_transfer(spi_imx, transfer);
+ /*
* Calculate the estimated time in us the transfer runs. Find
* the number of Hz per byte per polling limit.
*/
@@ -1618,9 +1624,6 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
if (transfer->len < byte_limit)
return spi_imx_poll_transfer(spi, transfer);
- if (spi_imx->usedma)
- return spi_imx_dma_transfer(spi_imx, transfer);
-
return spi_imx_pio_transfer(spi, transfer);
}
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index a33c9a3de395..d6aff909fc36 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -1273,8 +1273,11 @@ static int mtk_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct mtk_spi *mdata = spi_master_get_devdata(master);
+ int ret;
- pm_runtime_disable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
mtk_spi_reset(mdata);
@@ -1283,6 +1286,9 @@ static int mtk_spi_remove(struct platform_device *pdev)
clk_unprepare(mdata->spi_hclk);
}
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 10f0c5a6e0dc..9f356612ba7e 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -924,8 +924,9 @@ static int tegra_qspi_start_transfer_one(struct spi_device *spi,
static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
{
struct tegra_qspi_client_data *cdata;
+ struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
- cdata = devm_kzalloc(&spi->dev, sizeof(*cdata), GFP_KERNEL);
+ cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL);
if (!cdata)
return NULL;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index f9589c5b62ba..1e5ad3b476ef 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -439,7 +439,7 @@ int rtllib_wx_set_essid(struct rtllib_device *ieee,
union iwreq_data *wrqu, char *extra)
{
- int ret = 0, len, i;
+ int ret = 0, len;
short proto_started;
unsigned long flags;
@@ -455,13 +455,6 @@ int rtllib_wx_set_essid(struct rtllib_device *ieee,
goto out;
}
- for (i = 0; i < len; i++) {
- if (extra[i] < 0) {
- ret = -1;
- goto out;
- }
- }
-
if (proto_started)
rtllib_stop_protocol(ieee, true);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 4407b56aa6d1..139031ccb700 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -397,6 +397,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host
ret = device_register(&tl_hba->dev);
if (ret) {
pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
+ put_device(&tl_hba->dev);
return -ENODEV;
}
@@ -1073,7 +1074,7 @@ check_len:
*/
ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
if (ret)
- goto out;
+ return ERR_PTR(ret);
sh = tl_hba->sh;
tcm_loop_hba_no_cnt++;
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index f3947be13e2e..64f0e047c23d 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -80,7 +80,7 @@ static int optee_register_device(const uuid_t *device_uuid)
rc = device_register(&optee_device->dev);
if (rc) {
pr_err("device registration failed, err: %d\n", rc);
- kfree(optee_device);
+ put_device(&optee_device->dev);
}
return rc;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 5e516f5cac5a..b6e0cc4571ea 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -264,7 +264,7 @@ struct gsm_mux {
bool constipated; /* Asked by remote to shut up */
bool has_devices; /* Devices were registered */
- struct mutex tx_mutex;
+ spinlock_t tx_lock;
unsigned int tx_bytes; /* TX data outstanding */
#define TX_THRESH_HI 8192
#define TX_THRESH_LO 2048
@@ -272,7 +272,7 @@ struct gsm_mux {
struct list_head tx_data_list; /* Pending data packets */
/* Control messages */
- struct delayed_work kick_timeout; /* Kick TX queuing on timeout */
+ struct timer_list kick_timer; /* Kick TX queuing on timeout */
struct timer_list t2_timer; /* Retransmit timer for commands */
int cretries; /* Command retry counter */
struct gsm_control *pending_cmd;/* Our current pending command */
@@ -700,6 +700,7 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
struct gsm_msg *msg;
u8 *dp;
int ocr;
+ unsigned long flags;
msg = gsm_data_alloc(gsm, addr, 0, control);
if (!msg)
@@ -721,10 +722,10 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
gsm_print_packet("Q->", addr, cr, control, NULL, 0);
- mutex_lock(&gsm->tx_mutex);
+ spin_lock_irqsave(&gsm->tx_lock, flags);
list_add_tail(&msg->list, &gsm->tx_ctrl_list);
gsm->tx_bytes += msg->len;
- mutex_unlock(&gsm->tx_mutex);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
gsmld_write_trigger(gsm);
return 0;
@@ -749,7 +750,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
spin_unlock_irqrestore(&dlci->lock, flags);
/* Clear data packets in MUX write queue */
- mutex_lock(&gsm->tx_mutex);
+ spin_lock_irqsave(&gsm->tx_lock, flags);
list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
if (msg->addr != addr)
continue;
@@ -757,7 +758,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
list_del(&msg->list);
kfree(msg);
}
- mutex_unlock(&gsm->tx_mutex);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
}
/**
@@ -1028,7 +1029,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
gsm->tx_bytes += msg->len;
gsmld_write_trigger(gsm);
- schedule_delayed_work(&gsm->kick_timeout, 10 * gsm->t1 * HZ / 100);
+ mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100);
}
/**
@@ -1043,9 +1044,10 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
{
- mutex_lock(&dlci->gsm->tx_mutex);
+ unsigned long flags;
+ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
__gsm_data_queue(dlci, msg);
- mutex_unlock(&dlci->gsm->tx_mutex);
+ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
}
/**
@@ -1057,7 +1059,7 @@ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
* is data. Keep to the MRU of the mux. This path handles the usual tty
* interface which is a byte stream with optional modem data.
*
- * Caller must hold the tx_mutex of the mux.
+ * Caller must hold the tx_lock of the mux.
*/
static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
@@ -1117,7 +1119,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
* is data. Keep to the MRU of the mux. This path handles framed data
* queued as skbuffs to the DLCI.
*
- * Caller must hold the tx_mutex of the mux.
+ * Caller must hold the tx_lock of the mux.
*/
static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
@@ -1133,7 +1135,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
if (dlci->adaption == 4)
overhead = 1;
- /* dlci->skb is locked by tx_mutex */
+ /* dlci->skb is locked by tx_lock */
if (dlci->skb == NULL) {
dlci->skb = skb_dequeue_tail(&dlci->skb_list);
if (dlci->skb == NULL)
@@ -1187,7 +1189,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
* Push an empty frame in to the transmit queue to update the modem status
* bits and to transmit an optional break.
*
- * Caller must hold the tx_mutex of the mux.
+ * Caller must hold the tx_lock of the mux.
*/
static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
@@ -1301,12 +1303,13 @@ static int gsm_dlci_data_sweep(struct gsm_mux *gsm)
static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
{
+ unsigned long flags;
int sweep;
if (dlci->constipated)
return;
- mutex_lock(&dlci->gsm->tx_mutex);
+ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
/* If we have nothing running then we need to fire up */
sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
if (dlci->gsm->tx_bytes == 0) {
@@ -1317,7 +1320,7 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
}
if (sweep)
gsm_dlci_data_sweep(dlci->gsm);
- mutex_unlock(&dlci->gsm->tx_mutex);
+ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
}
/*
@@ -1708,7 +1711,7 @@ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
unsigned int command, u8 *data, int clen)
{
struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
- GFP_KERNEL);
+ GFP_ATOMIC);
unsigned long flags;
if (ctrl == NULL)
return NULL;
@@ -2019,23 +2022,24 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
}
/**
- * gsm_kick_timeout - transmit if possible
- * @work: work contained in our gsm object
+ * gsm_kick_timer - transmit if possible
+ * @t: timer contained in our gsm object
*
* Transmit data from DLCIs if the queue is empty. We can't rely on
* a tty wakeup except when we filled the pipe so we need to fire off
* new data ourselves in other cases.
*/
-static void gsm_kick_timeout(struct work_struct *work)
+static void gsm_kick_timer(struct timer_list *t)
{
- struct gsm_mux *gsm = container_of(work, struct gsm_mux, kick_timeout.work);
+ struct gsm_mux *gsm = from_timer(gsm, t, kick_timer);
+ unsigned long flags;
int sent = 0;
- mutex_lock(&gsm->tx_mutex);
+ spin_lock_irqsave(&gsm->tx_lock, flags);
/* If we have nothing running then we need to fire up */
if (gsm->tx_bytes < TX_THRESH_LO)
sent = gsm_dlci_data_sweep(gsm);
- mutex_unlock(&gsm->tx_mutex);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
if (sent && debug & DBG_DATA)
pr_info("%s TX queue stalled\n", __func__);
@@ -2492,7 +2496,7 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
}
/* Finish outstanding timers, making sure they are done */
- cancel_delayed_work_sync(&gsm->kick_timeout);
+ del_timer_sync(&gsm->kick_timer);
del_timer_sync(&gsm->t2_timer);
/* Finish writing to ldisc */
@@ -2565,7 +2569,6 @@ static void gsm_free_mux(struct gsm_mux *gsm)
break;
}
}
- mutex_destroy(&gsm->tx_mutex);
mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
@@ -2637,15 +2640,15 @@ static struct gsm_mux *gsm_alloc_mux(void)
}
spin_lock_init(&gsm->lock);
mutex_init(&gsm->mutex);
- mutex_init(&gsm->tx_mutex);
kref_init(&gsm->ref);
INIT_LIST_HEAD(&gsm->tx_ctrl_list);
INIT_LIST_HEAD(&gsm->tx_data_list);
- INIT_DELAYED_WORK(&gsm->kick_timeout, gsm_kick_timeout);
+ timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
INIT_WORK(&gsm->tx_work, gsmld_write_task);
init_waitqueue_head(&gsm->event);
spin_lock_init(&gsm->control_lock);
+ spin_lock_init(&gsm->tx_lock);
gsm->t1 = T1;
gsm->t2 = T2;
@@ -2670,7 +2673,6 @@ static struct gsm_mux *gsm_alloc_mux(void)
}
spin_unlock(&gsm_mux_lock);
if (i == MAX_MUX) {
- mutex_destroy(&gsm->tx_mutex);
mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
@@ -2826,16 +2828,17 @@ static void gsmld_write_trigger(struct gsm_mux *gsm)
static void gsmld_write_task(struct work_struct *work)
{
struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work);
+ unsigned long flags;
int i, ret;
/* All outstanding control channel and control messages and one data
* frame is sent.
*/
ret = -ENODEV;
- mutex_lock(&gsm->tx_mutex);
+ spin_lock_irqsave(&gsm->tx_lock, flags);
if (gsm->tty)
ret = gsm_data_kick(gsm);
- mutex_unlock(&gsm->tx_mutex);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
if (ret >= 0)
for (i = 0; i < NUM_DLCI; i++)
@@ -3042,6 +3045,7 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
struct gsm_mux *gsm = tty->disc_data;
+ unsigned long flags;
int space;
int ret;
@@ -3049,13 +3053,13 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
return -ENODEV;
ret = -ENOBUFS;
- mutex_lock(&gsm->tx_mutex);
+ spin_lock_irqsave(&gsm->tx_lock, flags);
space = tty_write_room(tty);
if (space >= nr)
ret = tty->ops->write(tty, buf, nr);
else
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- mutex_unlock(&gsm->tx_mutex);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
return ret;
}
@@ -3352,13 +3356,14 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk)
{
struct gsm_mux *gsm = dlci->gsm;
+ unsigned long flags;
if (dlci->state != DLCI_OPEN || dlci->adaption != 2)
return;
- mutex_lock(&gsm->tx_mutex);
+ spin_lock_irqsave(&gsm->tx_lock, flags);
gsm_dlci_modem_output(gsm, dlci, brk);
- mutex_unlock(&gsm->tx_mutex);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
}
/**
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 44cc755b1a29..0e43bdfb7459 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -174,6 +174,8 @@ static int ehl_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
*/
up->dma = dma;
+ lpss->dma_maxburst = 16;
+
port->set_termios = dw8250_do_set_termios;
return 0;
@@ -277,8 +279,13 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port
struct dw_dma_slave *rx_param, *tx_param;
struct device *dev = port->port.dev;
- if (!lpss->dma_param.dma_dev)
+ if (!lpss->dma_param.dma_dev) {
+ dma = port->dma;
+ if (dma)
+ goto out_configuration_only;
+
return 0;
+ }
rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL);
if (!rx_param)
@@ -289,16 +296,18 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port
return -ENOMEM;
*rx_param = lpss->dma_param;
- dma->rxconf.src_maxburst = lpss->dma_maxburst;
-
*tx_param = lpss->dma_param;
- dma->txconf.dst_maxburst = lpss->dma_maxburst;
dma->fn = lpss8250_dma_filter;
dma->rx_param = rx_param;
dma->tx_param = tx_param;
port->dma = dma;
+
+out_configuration_only:
+ dma->rxconf.src_maxburst = lpss->dma_maxburst;
+ dma->txconf.dst_maxburst = lpss->dma_maxburst;
+
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 41b8c6b27136..3f33014022f0 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -157,7 +157,11 @@ static u32 uart_read(struct uart_8250_port *up, u32 reg)
return readl(up->port.membase + (reg << up->port.regshift));
}
-static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+/*
+ * Called on runtime PM resume path from omap8250_restore_regs(), and
+ * omap8250_set_mctrl().
+ */
+static void __omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct omap8250_priv *priv = up->port.private_data;
@@ -181,6 +185,20 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
}
}
+static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ int err;
+
+ err = pm_runtime_resume_and_get(port->dev);
+ if (err)
+ return;
+
+ __omap8250_set_mctrl(port, mctrl);
+
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+}
+
/*
* Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
* The access to uart register after MDR1 Access
@@ -193,27 +211,10 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void omap_8250_mdr1_errataset(struct uart_8250_port *up,
struct omap8250_priv *priv)
{
- u8 timeout = 255;
-
serial_out(up, UART_OMAP_MDR1, priv->mdr1);
udelay(2);
serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
UART_FCR_CLEAR_RCVR);
- /*
- * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
- * TX_FIFO_E bit is 1.
- */
- while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
- (UART_LSR_THRE | UART_LSR_DR))) {
- timeout--;
- if (!timeout) {
- /* Should *never* happen. we warn and carry on */
- dev_crit(up->port.dev, "Errata i202: timedout %x\n",
- serial_in(up, UART_LSR));
- break;
- }
- udelay(1);
- }
}
static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud,
@@ -292,6 +293,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
{
struct omap8250_priv *priv = up->port.private_data;
struct uart_8250_dma *dma = up->dma;
+ u8 mcr = serial8250_in_MCR(up);
if (dma && dma->tx_running) {
/*
@@ -308,7 +310,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
serial_out(up, UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
- serial8250_out_MCR(up, UART_MCR_TCRTLR);
+ serial8250_out_MCR(up, mcr | UART_MCR_TCRTLR);
serial_out(up, UART_FCR, up->fcr);
omap8250_update_scr(up, priv);
@@ -324,7 +326,8 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
serial_out(up, UART_LCR, 0);
/* drop TCR + TLR access, we setup XON/XOFF later */
- serial8250_out_MCR(up, up->mcr);
+ serial8250_out_MCR(up, mcr);
+
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
@@ -341,7 +344,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
omap8250_update_mdr1(up, priv);
- up->port.ops->set_mctrl(&up->port, up->port.mctrl);
+ __omap8250_set_mctrl(&up->port, up->port.mctrl);
if (up->port.rs485.flags & SER_RS485_ENABLED)
serial8250_em485_stop_tx(up);
@@ -669,7 +672,6 @@ static int omap_8250_startup(struct uart_port *port)
pm_runtime_get_sync(port->dev);
- up->mcr = 0;
serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_out(up, UART_LCR, UART_LCR_WLEN8);
@@ -1458,9 +1460,15 @@ err:
static int omap8250_remove(struct platform_device *pdev)
{
struct omap8250_priv *priv = platform_get_drvdata(pdev);
+ int err;
+
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
+ return err;
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
+ flush_work(&priv->qos_work);
pm_runtime_disable(&pdev->dev);
serial8250_unregister_port(priv->line);
cpu_latency_qos_remove_request(&priv->pm_qos_request);
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index fe8662cd9402..388172289627 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1897,10 +1897,13 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status);
static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
{
switch (iir & 0x3f) {
- case UART_IIR_RX_TIMEOUT:
- serial8250_rx_dma_flush(up);
+ case UART_IIR_RDI:
+ if (!up->dma->rx_running)
+ break;
fallthrough;
case UART_IIR_RLSI:
+ case UART_IIR_RX_TIMEOUT:
+ serial8250_rx_dma_flush(up);
return true;
}
return up->dma->rx_dma(up);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 67fa113f77d4..888e01fbd9c5 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -12,6 +12,7 @@
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -404,33 +405,6 @@ static unsigned int lpuart_get_baud_clk_rate(struct lpuart_port *sport)
#define lpuart_enable_clks(x) __lpuart_enable_clks(x, true)
#define lpuart_disable_clks(x) __lpuart_enable_clks(x, false)
-static int lpuart_global_reset(struct lpuart_port *sport)
-{
- struct uart_port *port = &sport->port;
- void __iomem *global_addr;
- int ret;
-
- if (uart_console(port))
- return 0;
-
- ret = clk_prepare_enable(sport->ipg_clk);
- if (ret) {
- dev_err(sport->port.dev, "failed to enable uart ipg clk: %d\n", ret);
- return ret;
- }
-
- if (is_imx7ulp_lpuart(sport) || is_imx8qxp_lpuart(sport)) {
- global_addr = port->membase + UART_GLOBAL - IMX_REG_OFF;
- writel(UART_GLOBAL_RST, global_addr);
- usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US);
- writel(0, global_addr);
- usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US);
- }
-
- clk_disable_unprepare(sport->ipg_clk);
- return 0;
-}
-
static void lpuart_stop_tx(struct uart_port *port)
{
unsigned char temp;
@@ -2636,6 +2610,54 @@ static const struct serial_rs485 lpuart_rs485_supported = {
/* delay_rts_* and RX_DURING_TX are not supported */
};
+static int lpuart_global_reset(struct lpuart_port *sport)
+{
+ struct uart_port *port = &sport->port;
+ void __iomem *global_addr;
+ unsigned long ctrl, bd;
+ unsigned int val = 0;
+ int ret;
+
+ ret = clk_prepare_enable(sport->ipg_clk);
+ if (ret) {
+ dev_err(sport->port.dev, "failed to enable uart ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ if (is_imx7ulp_lpuart(sport) || is_imx8qxp_lpuart(sport)) {
+ /*
+ * If the transmitter is used by earlycon, wait for transmit engine to
+ * complete and then reset.
+ */
+ ctrl = lpuart32_read(port, UARTCTRL);
+ if (ctrl & UARTCTRL_TE) {
+ bd = lpuart32_read(&sport->port, UARTBAUD);
+ if (read_poll_timeout(lpuart32_tx_empty, val, val, 1, 100000, false,
+ port)) {
+ dev_warn(sport->port.dev,
+ "timeout waiting for transmit engine to complete\n");
+ clk_disable_unprepare(sport->ipg_clk);
+ return 0;
+ }
+ }
+
+ global_addr = port->membase + UART_GLOBAL - IMX_REG_OFF;
+ writel(UART_GLOBAL_RST, global_addr);
+ usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US);
+ writel(0, global_addr);
+ usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US);
+
+ /* Recover the transmitter for earlycon. */
+ if (ctrl & UARTCTRL_TE) {
+ lpuart32_write(port, bd, UARTBAUD);
+ lpuart32_write(port, ctrl, UARTCTRL);
+ }
+ }
+
+ clk_disable_unprepare(sport->ipg_clk);
+ return 0;
+}
+
static int lpuart_probe(struct platform_device *pdev)
{
const struct lpuart_soc_data *sdata = of_device_get_match_data(&pdev->dev);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 05b432dc7a85..aadda66405b4 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2594,6 +2594,7 @@ static const struct dev_pm_ops imx_uart_pm_ops = {
.suspend_noirq = imx_uart_suspend_noirq,
.resume_noirq = imx_uart_resume_noirq,
.freeze_noirq = imx_uart_suspend_noirq,
+ .thaw_noirq = imx_uart_resume_noirq,
.restore_noirq = imx_uart_resume_noirq,
.suspend = imx_uart_suspend,
.resume = imx_uart_resume,
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index c67715f6f756..f9aa50ff14d4 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -600,11 +600,11 @@ int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
trace_cdnsp_ep_halt(value ? "Set" : "Clear");
- if (value) {
- ret = cdnsp_cmd_stop_ep(pdev, pep);
- if (ret)
- return ret;
+ ret = cdnsp_cmd_stop_ep(pdev, pep);
+ if (ret)
+ return ret;
+ if (value) {
if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) {
cdnsp_queue_halt_endpoint(pdev, pep->idx);
cdnsp_ring_cmd_db(pdev);
@@ -613,10 +613,6 @@ int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
pep->ep_state |= EP_HALTED;
} else {
- /*
- * In device mode driver can call reset endpoint command
- * from any endpoint state.
- */
cdnsp_queue_reset_ep(pdev, pep->idx);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index 794e413800ae..2f29431f612e 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -1763,10 +1763,15 @@ static u32 cdnsp_td_remainder(struct cdnsp_device *pdev,
int trb_buff_len,
unsigned int td_total_len,
struct cdnsp_request *preq,
- bool more_trbs_coming)
+ bool more_trbs_coming,
+ bool zlp)
{
u32 maxp, total_packet_count;
+ /* Before ZLP driver needs set TD_SIZE = 1. */
+ if (zlp)
+ return 1;
+
/* One TRB with a zero-length data packet. */
if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
trb_buff_len == td_total_len)
@@ -1960,7 +1965,8 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
/* Set the TRB length, TD size, and interrupter fields. */
remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len,
full_len, preq,
- more_trbs_coming);
+ more_trbs_coming,
+ zero_len_trb);
length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
@@ -2025,7 +2031,7 @@ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
if (preq->request.length > 0) {
remainder = cdnsp_td_remainder(pdev, 0, preq->request.length,
- preq->request.length, preq, 1);
+ preq->request.length, preq, 1, 0);
length_field = TRB_LEN(preq->request.length) |
TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0);
@@ -2076,7 +2082,8 @@ int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx);
int ret = 0;
- if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED) {
+ if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED ||
+ ep_state == EP_STATE_HALTED) {
trace_cdnsp_ep_stopped_or_disabled(pep->out_ctx);
goto ep_stopped;
}
@@ -2225,7 +2232,7 @@ static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
/* Set the TRB length, TD size, & interrupter fields. */
remainder = cdnsp_td_remainder(pdev, running_total,
trb_buff_len, td_len, preq,
- more_trbs_coming);
+ more_trbs_coming, 0);
length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0);
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index 9643b905e2d8..6164fc4c96a4 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -24,11 +24,37 @@
#define CFG_RXDET_P3_EN BIT(15)
#define LPM_2_STB_SWITCH_EN BIT(25)
-static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd);
+static void xhci_cdns3_plat_start(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ u32 value;
+
+ /* set usbcmd.EU3S */
+ value = readl(&xhci->op_regs->command);
+ value |= CMD_PM_INDEX;
+ writel(value, &xhci->op_regs->command);
+
+ if (hcd->regs) {
+ value = readl(hcd->regs + XECP_AUX_CTRL_REG1);
+ value |= CFG_RXDET_P3_EN;
+ writel(value, hcd->regs + XECP_AUX_CTRL_REG1);
+
+ value = readl(hcd->regs + XECP_PORT_CAP_REG);
+ value |= LPM_2_STB_SWITCH_EN;
+ writel(value, hcd->regs + XECP_PORT_CAP_REG);
+ }
+}
+
+static int xhci_cdns3_resume_quirk(struct usb_hcd *hcd)
+{
+ xhci_cdns3_plat_start(hcd);
+ return 0;
+}
static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
.quirks = XHCI_SKIP_PHY_INIT | XHCI_AVOID_BEI,
- .suspend_quirk = xhci_cdns3_suspend_quirk,
+ .plat_start = xhci_cdns3_plat_start,
+ .resume_quirk = xhci_cdns3_resume_quirk,
};
static int __cdns_host_init(struct cdns *cdns)
@@ -90,32 +116,6 @@ err1:
return ret;
}
-static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
-{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- u32 value;
-
- if (pm_runtime_status_suspended(hcd->self.controller))
- return 0;
-
- /* set usbcmd.EU3S */
- value = readl(&xhci->op_regs->command);
- value |= CMD_PM_INDEX;
- writel(value, &xhci->op_regs->command);
-
- if (hcd->regs) {
- value = readl(hcd->regs + XECP_AUX_CTRL_REG1);
- value |= CFG_RXDET_P3_EN;
- writel(value, hcd->regs + XECP_AUX_CTRL_REG1);
-
- value = readl(hcd->regs + XECP_PORT_CAP_REG);
- value |= LPM_2_STB_SWITCH_EN;
- writel(value, hcd->regs + XECP_PORT_CAP_REG);
- }
-
- return 0;
-}
-
static void cdns_host_exit(struct cdns *cdns)
{
kfree(cdns->xhci_plat_data);
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index ada78daba6df..c17516c29b63 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -256,8 +256,10 @@ static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
ci->enabled_otg_timer_bits &= ~(1 << t);
if (ci->next_otg_timer == t) {
if (ci->enabled_otg_timer_bits == 0) {
+ spin_unlock_irqrestore(&ci->lock, flags);
/* No enabled timers after delete it */
hrtimer_cancel(&ci->otg_fsm_hrtimer);
+ spin_lock_irqsave(&ci->lock, flags);
ci->next_otg_timer = NUM_OTG_FSM_TIMERS;
} else {
/* Find the next timer */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 0722d2131305..079e183cf3bf 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -362,6 +362,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
+ /* Realforce 87U Keyboard */
+ { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
+
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c0e7c76dc5c8..1f348bc867c2 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1711,6 +1711,16 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
return extcon_get_extcon_dev(name);
/*
+ * Check explicitly if "usb-role-switch" is used since
+ * extcon_find_edev_by_node() can not be used to check the absence of
+ * an extcon device. In the absence of an device it will always return
+ * EPROBE_DEFER.
+ */
+ if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) &&
+ device_property_read_bool(dev, "usb-role-switch"))
+ return NULL;
+
+ /*
* Try to get an extcon device from the USB PHY controller's "port"
* node. Check if it has the "port" node first, to avoid printing the
* error message from underlying code, as it's a valid case: extcon
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 0ecf20eeceee..4be6a873bd07 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -37,15 +37,6 @@ struct dwc3_exynos {
struct regulator *vdd10;
};
-static int dwc3_exynos_remove_child(struct device *dev, void *unused)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- platform_device_unregister(pdev);
-
- return 0;
-}
-
static int dwc3_exynos_probe(struct platform_device *pdev)
{
struct dwc3_exynos *exynos;
@@ -142,7 +133,7 @@ static int dwc3_exynos_remove(struct platform_device *pdev)
struct dwc3_exynos *exynos = platform_get_drvdata(pdev);
int i;
- device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
+ of_platform_depopulate(&pdev->dev);
for (i = exynos->num_clks - 1; i >= 0; i--)
clk_disable_unprepare(exynos->clks[i]);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 5fe2d136dff5..6d524fa76443 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -291,7 +291,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
*
* DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
*/
- if (dwc->gadget->speed <= USB_SPEED_HIGH) {
+ if (dwc->gadget->speed <= USB_SPEED_HIGH ||
+ DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
@@ -1023,13 +1024,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
reg &= ~DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
- /* Clear out the ep descriptors for non-ep0 */
- if (dep->number > 1) {
- dep->endpoint.comp_desc = NULL;
- dep->endpoint.desc = NULL;
- }
-
- dwc3_remove_requests(dwc, dep, -ECONNRESET);
+ dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
dep->stream_capable = false;
dep->type = 0;
@@ -1043,6 +1038,12 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
mask |= (DWC3_EP_DELAY_STOP | DWC3_EP_TRANSFER_STARTED);
dep->flags &= mask;
+ /* Clear out the ep descriptors for non-ep0 */
+ if (dep->number > 1) {
+ dep->endpoint.comp_desc = NULL;
+ dep->endpoint.desc = NULL;
+ }
+
return 0;
}
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index a7154fe8206d..f6f13e7f1ba1 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -11,13 +11,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
-#include "../host/xhci-plat.h"
#include "core.h"
-static const struct xhci_plat_priv dwc3_xhci_plat_priv = {
- .quirks = XHCI_SKIP_PHY_INIT,
-};
-
static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
int irq, char *name)
{
@@ -97,11 +92,6 @@ int dwc3_host_init(struct dwc3 *dwc)
goto err;
}
- ret = platform_device_add_data(xhci, &dwc3_xhci_plat_priv,
- sizeof(dwc3_xhci_plat_priv));
- if (ret)
- goto err;
-
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
if (dwc->usb3_lpm_capable)
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index c4ed48d6b8a4..a189b08bba80 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -199,16 +199,6 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
* V4L2 ioctls
*/
-struct uvc_format {
- u8 bpp;
- u32 fcc;
-};
-
-static struct uvc_format uvc_formats[] = {
- { 16, V4L2_PIX_FMT_YUYV },
- { 0, V4L2_PIX_FMT_MJPEG },
-};
-
static int
uvc_v4l2_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
@@ -243,47 +233,6 @@ uvc_v4l2_get_format(struct file *file, void *fh, struct v4l2_format *fmt)
}
static int
-uvc_v4l2_set_format(struct file *file, void *fh, struct v4l2_format *fmt)
-{
- struct video_device *vdev = video_devdata(file);
- struct uvc_device *uvc = video_get_drvdata(vdev);
- struct uvc_video *video = &uvc->video;
- struct uvc_format *format;
- unsigned int imagesize;
- unsigned int bpl;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(uvc_formats); ++i) {
- format = &uvc_formats[i];
- if (format->fcc == fmt->fmt.pix.pixelformat)
- break;
- }
-
- if (i == ARRAY_SIZE(uvc_formats)) {
- uvcg_info(&uvc->func, "Unsupported format 0x%08x.\n",
- fmt->fmt.pix.pixelformat);
- return -EINVAL;
- }
-
- bpl = format->bpp * fmt->fmt.pix.width / 8;
- imagesize = bpl ? bpl * fmt->fmt.pix.height : fmt->fmt.pix.sizeimage;
-
- video->fcc = format->fcc;
- video->bpp = format->bpp;
- video->width = fmt->fmt.pix.width;
- video->height = fmt->fmt.pix.height;
- video->imagesize = imagesize;
-
- fmt->fmt.pix.field = V4L2_FIELD_NONE;
- fmt->fmt.pix.bytesperline = bpl;
- fmt->fmt.pix.sizeimage = imagesize;
- fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
- fmt->fmt.pix.priv = 0;
-
- return 0;
-}
-
-static int
uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct video_device *vdev = video_devdata(file);
@@ -324,6 +273,27 @@ uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt)
}
static int
+uvc_v4l2_set_format(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvc_video *video = &uvc->video;
+ int ret;
+
+ ret = uvc_v4l2_try_format(file, fh, fmt);
+ if (ret)
+ return ret;
+
+ video->fcc = fmt->fmt.pix.pixelformat;
+ video->bpp = fmt->fmt.pix.bytesperline * 8 / video->width;
+ video->width = fmt->fmt.pix.width;
+ video->height = fmt->fmt.pix.height;
+ video->imagesize = fmt->fmt.pix.sizeimage;
+
+ return ret;
+}
+
+static int
uvc_v4l2_enum_frameintervals(struct file *file, void *fh,
struct v4l2_frmivalenum *fival)
{
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 2df52f75f6b3..7558cc4d90cc 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -285,7 +285,7 @@ static void bcma_hci_platform_power_gpio(struct bcma_device *dev, bool val)
{
struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev);
- if (IS_ERR_OR_NULL(usb_dev->gpio_desc))
+ if (!usb_dev->gpio_desc)
return;
gpiod_set_value(usb_dev->gpio_desc, val);
@@ -406,9 +406,11 @@ static int bcma_hcd_probe(struct bcma_device *core)
return -ENOMEM;
usb_dev->core = core;
- if (core->dev.of_node)
- usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
- GPIOD_OUT_HIGH);
+ usb_dev->gpio_desc = devm_gpiod_get_optional(&core->dev, "vcc",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(usb_dev->gpio_desc))
+ return dev_err_probe(&core->dev, PTR_ERR(usb_dev->gpio_desc),
+ "error obtaining VCC GPIO");
switch (core->id.id) {
case BCMA_CORE_USB20_HOST:
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 697683e3fbff..c3b7f1d98e78 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -162,6 +162,8 @@ static void option_instat_callback(struct urb *urb);
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
+#define UBLOX_VENDOR_ID 0x1546
+
/* AMOI PRODUCTS */
#define AMOI_VENDOR_ID 0x1614
#define AMOI_PRODUCT_H01 0x0800
@@ -240,7 +242,6 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_UC15 0x9090
/* These u-blox products use Qualcomm's vendor ID */
#define UBLOX_PRODUCT_R410M 0x90b2
-#define UBLOX_PRODUCT_R6XX 0x90fa
/* These Yuga products use Qualcomm's vendor ID */
#define YUGA_PRODUCT_CLM920_NC5 0x9625
@@ -581,6 +582,9 @@ static void option_instat_callback(struct urb *urb);
#define OPPO_VENDOR_ID 0x22d9
#define OPPO_PRODUCT_R11 0x276c
+/* Sierra Wireless products */
+#define SIERRA_VENDOR_ID 0x1199
+#define SIERRA_PRODUCT_EM9191 0x90d3
/* Device flags */
@@ -1124,8 +1128,16 @@ static const struct usb_device_id option_ids[] = {
/* u-blox products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
.driver_info = RSVD(1) | RSVD(3) },
- { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX),
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x908b), /* u-blox LARA-R6 00B */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa),
.driver_info = RSVD(3) },
+ /* u-blox products */
+ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) }, /* u-blox LARA-L6 */
+ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1342), /* u-blox LARA-L6 (RMNET) */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1343), /* u-blox LARA-L6 (ECM) */
+ .driver_info = RSVD(4) },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
@@ -2167,6 +2179,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
@@ -2176,6 +2189,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index e1f4df7238bf..fdbf3694e21f 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -369,13 +369,24 @@ pmc_usb_mux_usb4(struct pmc_usb_port *port, struct typec_mux_state *state)
return pmc_usb_command(port, (void *)&req, sizeof(req));
}
-static int pmc_usb_mux_safe_state(struct pmc_usb_port *port)
+static int pmc_usb_mux_safe_state(struct pmc_usb_port *port,
+ struct typec_mux_state *state)
{
u8 msg;
if (IOM_PORT_ACTIVITY_IS(port->iom_status, SAFE_MODE))
return 0;
+ if ((IOM_PORT_ACTIVITY_IS(port->iom_status, DP) ||
+ IOM_PORT_ACTIVITY_IS(port->iom_status, DP_MFD)) &&
+ state->alt && state->alt->svid == USB_TYPEC_DP_SID)
+ return 0;
+
+ if ((IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) ||
+ IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB)) &&
+ state->alt && state->alt->svid == USB_TYPEC_TBT_SID)
+ return 0;
+
msg = PMC_USB_SAFE_MODE;
msg |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
@@ -443,7 +454,7 @@ pmc_usb_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
return 0;
if (state->mode == TYPEC_STATE_SAFE)
- return pmc_usb_mux_safe_state(port);
+ return pmc_usb_mux_safe_state(port, state);
if (state->mode == TYPEC_STATE_USB)
return pmc_usb_connect(port, port->role);
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index b637e8b378b3..2a77bab948f5 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -474,7 +474,7 @@ static void tps6598x_handle_plug_event(struct tps6598x *tps, u32 status)
static irqreturn_t cd321x_interrupt(int irq, void *data)
{
struct tps6598x *tps = data;
- u64 event;
+ u64 event = 0;
u32 status;
int ret;
@@ -519,8 +519,8 @@ err_unlock:
static irqreturn_t tps6598x_interrupt(int irq, void *data)
{
struct tps6598x *tps = data;
- u64 event1;
- u64 event2;
+ u64 event1 = 0;
+ u64 event2 = 0;
u32 status;
int ret;
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index badc9d828cac..e030c2120183 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -2488,12 +2488,12 @@ static bool vfio_pci_dev_set_needs_reset(struct vfio_device_set *dev_set)
struct vfio_pci_core_device *cur;
bool needs_reset = false;
- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
- /* No VFIO device in the set can have an open device FD */
- if (cur->vdev.open_count)
- return false;
+ /* No other VFIO device in the set can be open. */
+ if (vfio_device_set_open_count(dev_set) > 1)
+ return false;
+
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
needs_reset |= cur->needs_reset;
- }
return needs_reset;
}
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index 2d168793d4e1..6e8804fe0095 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -125,6 +125,19 @@ static void vfio_release_device_set(struct vfio_device *device)
xa_unlock(&vfio_device_set_xa);
}
+unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set)
+{
+ struct vfio_device *cur;
+ unsigned int open_count = 0;
+
+ lockdep_assert_held(&dev_set->lock);
+
+ list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
+ open_count += cur->open_count;
+ return open_count;
+}
+EXPORT_SYMBOL_GPL(vfio_device_set_open_count);
+
/*
* Group objects - create, release, get, put, search
*/
@@ -801,8 +814,9 @@ static struct file *vfio_device_open(struct vfio_device *device)
err_close_device:
mutex_lock(&device->dev_set->lock);
mutex_lock(&device->group->group_lock);
- if (device->open_count == 1 && device->ops->close_device) {
- device->ops->close_device(device);
+ if (device->open_count == 1) {
+ if (device->ops->close_device)
+ device->ops->close_device(device);
vfio_device_container_unregister(device);
}
@@ -1017,10 +1031,12 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep)
mutex_lock(&device->dev_set->lock);
vfio_assert_device_open(device);
mutex_lock(&device->group->group_lock);
- if (device->open_count == 1 && device->ops->close_device)
- device->ops->close_device(device);
+ if (device->open_count == 1) {
+ if (device->ops->close_device)
+ device->ops->close_device(device);
- vfio_device_container_unregister(device);
+ vfio_device_container_unregister(device);
+ }
mutex_unlock(&device->group->group_lock);
device->open_count--;
if (device->open_count == 0)
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 098b62f7b701..c0143d38df83 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -577,7 +577,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
if (scr_readw(r) != vc->vc_video_erase_char)
break;
if (r != q && new_rows >= rows + logo_lines) {
- save = kmalloc(array3_size(logo_lines, new_cols, 2),
+ save = kzalloc(array3_size(logo_lines, new_cols, 2),
GFP_KERNEL);
if (save) {
int i = min(cols, new_cols);
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index f422f9c58ba7..1ea6d2e5b218 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -67,8 +67,27 @@ static bool is_vmpck_empty(struct snp_guest_dev *snp_dev)
return true;
}
+/*
+ * If an error is received from the host or AMD Secure Processor (ASP) there
+ * are two options. Either retry the exact same encrypted request or discontinue
+ * using the VMPCK.
+ *
+ * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
+ * encrypt the requests. The IV for this scheme is the sequence number. GCM
+ * cannot tolerate IV reuse.
+ *
+ * The ASP FW v1.51 only increments the sequence numbers on a successful
+ * guest<->ASP back and forth and only accepts messages at its exact sequence
+ * number.
+ *
+ * So if the sequence number were to be reused the encryption scheme is
+ * vulnerable. If the sequence number were incremented for a fresh IV the ASP
+ * will reject the request.
+ */
static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
{
+ dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n",
+ vmpck_id);
memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN);
snp_dev->vmpck = NULL;
}
@@ -321,34 +340,71 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
if (rc)
return rc;
- /* Call firmware to process the request */
+ /*
+ * Call firmware to process the request. In this function the encrypted
+ * message enters shared memory with the host. So after this call the
+ * sequence number must be incremented or the VMPCK must be deleted to
+ * prevent reuse of the IV.
+ */
rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
+
+ /*
+ * If the extended guest request fails due to having too small of a
+ * certificate data buffer, retry the same guest request without the
+ * extended data request in order to increment the sequence number
+ * and thus avoid IV reuse.
+ */
+ if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
+ err == SNP_GUEST_REQ_INVALID_LEN) {
+ const unsigned int certs_npages = snp_dev->input.data_npages;
+
+ exit_code = SVM_VMGEXIT_GUEST_REQUEST;
+
+ /*
+ * If this call to the firmware succeeds, the sequence number can
+ * be incremented allowing for continued use of the VMPCK. If
+ * there is an error reflected in the return value, this value
+ * is checked further down and the result will be the deletion
+ * of the VMPCK and the error code being propagated back to the
+ * user as an ioctl() return code.
+ */
+ rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
+
+ /*
+ * Override the error to inform callers the given extended
+ * request buffer size was too small and give the caller the
+ * required buffer size.
+ */
+ err = SNP_GUEST_REQ_INVALID_LEN;
+ snp_dev->input.data_npages = certs_npages;
+ }
+
if (fw_err)
*fw_err = err;
- if (rc)
- return rc;
+ if (rc) {
+ dev_alert(snp_dev->dev,
+ "Detected error from ASP request. rc: %d, fw_err: %llu\n",
+ rc, *fw_err);
+ goto disable_vmpck;
+ }
- /*
- * The verify_and_dec_payload() will fail only if the hypervisor is
- * actively modifying the message header or corrupting the encrypted payload.
- * This hints that hypervisor is acting in a bad faith. Disable the VMPCK so that
- * the key cannot be used for any communication. The key is disabled to ensure
- * that AES-GCM does not use the same IV while encrypting the request payload.
- */
rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
if (rc) {
dev_alert(snp_dev->dev,
- "Detected unexpected decode failure, disabling the vmpck_id %d\n",
- vmpck_id);
- snp_disable_vmpck(snp_dev);
- return rc;
+ "Detected unexpected decode failure from ASP. rc: %d\n",
+ rc);
+ goto disable_vmpck;
}
/* Increment to new message sequence after payload decryption was successful. */
snp_inc_msg_seqno(snp_dev);
return 0;
+
+disable_vmpck:
+ snp_disable_vmpck(snp_dev);
+ return rc;
}
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index 47aa3a1ccaf5..fd3a644b0855 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -228,7 +228,7 @@ static int register_pcpu(struct pcpu *pcpu)
err = device_register(dev);
if (err) {
- pcpu_release(dev);
+ put_device(dev);
return err;
}
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 18f0ed8b1f93..cd07e3fed0fa 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -54,7 +54,8 @@ static uint64_t get_callback_via(struct pci_dev *pdev)
pin = pdev->pin;
/* We don't know the GSI. Specify the PCI INTx line instead. */
- return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
+ return ((uint64_t)HVM_PARAM_CALLBACK_TYPE_PCI_INTX <<
+ HVM_CALLBACK_VIA_TYPE_SHIFT) |
((uint64_t)pci_domain_nr(pdev->bus) << 32) |
((uint64_t)pdev->bus->number << 16) |
((uint64_t)(pdev->devfn & 0xff) << 8) |
@@ -144,7 +145,7 @@ static int platform_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_warn(&pdev->dev, "Unable to set the evtchn callback "
"err=%d\n", ret);
- goto out;
+ goto irq_out;
}
}
@@ -152,13 +153,16 @@ static int platform_pci_probe(struct pci_dev *pdev,
grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
ret = gnttab_setup_auto_xlat_frames(grant_frames);
if (ret)
- goto out;
+ goto irq_out;
ret = gnttab_init();
if (ret)
goto grant_out;
return 0;
grant_out:
gnttab_free_auto_xlat_frames();
+irq_out:
+ if (!xen_have_vector_callback)
+ free_irq(pdev->irq, pdev);
out:
pci_release_region(pdev, 0);
mem_out:
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 5e53b4817f16..097316a74126 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -190,13 +190,16 @@ static const struct config_field caplist_pm[] = {
};
static struct msi_msix_field_config {
- u16 enable_bit; /* bit for enabling MSI/MSI-X */
- unsigned int int_type; /* interrupt type for exclusiveness check */
+ u16 enable_bit; /* bit for enabling MSI/MSI-X */
+ u16 allowed_bits; /* bits allowed to be changed */
+ unsigned int int_type; /* interrupt type for exclusiveness check */
} msi_field_config = {
.enable_bit = PCI_MSI_FLAGS_ENABLE,
+ .allowed_bits = PCI_MSI_FLAGS_ENABLE,
.int_type = INTERRUPT_TYPE_MSI,
}, msix_field_config = {
.enable_bit = PCI_MSIX_FLAGS_ENABLE,
+ .allowed_bits = PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL,
.int_type = INTERRUPT_TYPE_MSIX,
};
@@ -229,7 +232,7 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value,
return 0;
if (!dev_data->allow_interrupt_control ||
- (new_value ^ old_value) & ~field_config->enable_bit)
+ (new_value ^ old_value) & ~field_config->allowed_bits)
return PCIBIOS_SET_FAILED;
if (new_value & field_config->enable_bit) {
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index c0031a3ab42f..3ac5fcf98d0d 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -167,8 +167,8 @@ responded:
clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
}
- if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
- rtt_us < server->probe.rtt) {
+ rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us);
+ if (rtt_us < server->probe.rtt) {
server->probe.rtt = rtt_us;
server->rtt = rtt_us;
alist->preferred = index;
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 4981baf97835..b5237206eac3 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -406,7 +406,7 @@ void afs_put_server(struct afs_net *net, struct afs_server *server,
if (!server)
return;
- a = atomic_inc_return(&server->active);
+ a = atomic_read(&server->active);
zero = __refcount_dec_and_test(&server->ref, &r);
trace_afs_server(debug_id, r - 1, a, reason);
if (unlikely(zero))
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index a9543f01184c..dcb510f38dda 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -4663,7 +4663,12 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
int ret;
int i;
- ASSERT(!path->nowait);
+ /*
+ * The nowait semantics are used only for write paths, where we don't
+ * use the tree mod log and sequence numbers.
+ */
+ if (time_seq)
+ ASSERT(!path->nowait);
nritems = btrfs_header_nritems(path->nodes[0]);
if (nritems == 0)
@@ -4683,7 +4688,14 @@ again:
if (path->need_commit_sem) {
path->need_commit_sem = 0;
need_commit_sem = true;
- down_read(&fs_info->commit_root_sem);
+ if (path->nowait) {
+ if (!down_read_trylock(&fs_info->commit_root_sem)) {
+ ret = -EAGAIN;
+ goto done;
+ }
+ } else {
+ down_read(&fs_info->commit_root_sem);
+ }
}
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
}
@@ -4759,7 +4771,7 @@ again:
next = c;
ret = read_block_for_search(root, path, &next, level,
slot, &key);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN && !path->nowait)
goto again;
if (ret < 0) {
@@ -4769,6 +4781,10 @@ again:
if (!path->skip_locking) {
ret = btrfs_try_tree_read_lock(next);
+ if (!ret && path->nowait) {
+ ret = -EAGAIN;
+ goto done;
+ }
if (!ret && time_seq) {
/*
* If we don't get the lock, we may be racing
@@ -4799,7 +4815,7 @@ again:
ret = read_block_for_search(root, path, &next, level,
0, &key);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN && !path->nowait)
goto again;
if (ret < 0) {
@@ -4807,8 +4823,16 @@ again:
goto done;
}
- if (!path->skip_locking)
- btrfs_tree_read_lock(next);
+ if (!path->skip_locking) {
+ if (path->nowait) {
+ if (!btrfs_try_tree_read_lock(next)) {
+ ret = -EAGAIN;
+ goto done;
+ }
+ } else {
+ btrfs_tree_read_lock(next);
+ }
+ }
}
ret = 0;
done:
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d5dd8bed1488..5ba2e810dc6e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3105,6 +3105,8 @@ static int btrfs_ioctl_get_subvol_info(struct inode *inode, void __user *argp)
}
}
+ btrfs_free_path(path);
+ path = NULL;
if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
ret = -EFAULT;
@@ -3194,6 +3196,8 @@ static int btrfs_ioctl_get_subvol_rootref(struct btrfs_root *root,
}
out:
+ btrfs_free_path(path);
+
if (!ret || ret == -EOVERFLOW) {
rootrefs->num_items = found;
/* update min_treeid for next search */
@@ -3205,7 +3209,6 @@ out:
}
kfree(rootrefs);
- btrfs_free_path(path);
return ret;
}
@@ -4231,6 +4234,8 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
ipath->fspath->val[i] = rel_ptr;
}
+ btrfs_free_path(path);
+ path = NULL;
ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
ipath->fspath, size);
if (ret) {
@@ -4281,21 +4286,20 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
size = min_t(u32, loi->size, SZ_16M);
}
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
-
inodes = init_data_container(size);
if (IS_ERR(inodes)) {
ret = PTR_ERR(inodes);
- inodes = NULL;
- goto out;
+ goto out_loi;
}
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
inodes, ignore_offset);
+ btrfs_free_path(path);
if (ret == -EINVAL)
ret = -ENOENT;
if (ret < 0)
@@ -4307,7 +4311,6 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
ret = -EFAULT;
out:
- btrfs_free_path(path);
kvfree(inodes);
out_loi:
kfree(loi);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 9334c3157c22..b74105a10f16 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2951,14 +2951,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
dstgroup->rsv_excl = inherit->lim.rsv_excl;
- ret = update_qgroup_limit_item(trans, dstgroup);
- if (ret) {
- qgroup_mark_inconsistent(fs_info);
- btrfs_info(fs_info,
- "unable to update quota limit for %llu",
- dstgroup->qgroupid);
- goto unlock;
- }
+ qgroup_dirty(fs_info, dstgroup);
}
if (srcid) {
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 145c84b44fd0..1c4b693ee4a3 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5702,6 +5702,7 @@ static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
u64 ext_len;
u64 clone_len;
u64 clone_data_offset;
+ bool crossed_src_i_size = false;
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(clone_root->root, path);
@@ -5759,8 +5760,10 @@ static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
if (key.offset >= clone_src_i_size)
break;
- if (key.offset + ext_len > clone_src_i_size)
+ if (key.offset + ext_len > clone_src_i_size) {
ext_len = clone_src_i_size - key.offset;
+ crossed_src_i_size = true;
+ }
clone_data_offset = btrfs_file_extent_offset(leaf, ei);
if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
@@ -5821,6 +5824,25 @@ static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
ret = send_clone(sctx, offset, clone_len,
clone_root);
}
+ } else if (crossed_src_i_size && clone_len < len) {
+ /*
+ * If we are at i_size of the clone source inode and we
+ * can not clone from it, terminate the loop. This is
+ * to avoid sending two write operations, one with a
+ * length matching clone_len and the final one after
+ * this loop with a length of len - clone_len.
+ *
+ * When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED
+ * was passed to the send ioctl), this helps avoid
+ * sending an encoded write for an offset that is not
+ * sector size aligned, in case the i_size of the source
+ * inode is not sector size aligned. That will make the
+ * receiver fallback to decompression of the data and
+ * writing it using regular buffered IO, therefore while
+ * not incorrect, it's not optimal due decompression and
+ * possible re-compression at the receiver.
+ */
+ break;
} else {
ret = send_extent_data(sctx, dst_path, offset,
clone_len);
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 699b54b3acaa..74fef1f49c35 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -2321,8 +2321,11 @@ int __init btrfs_init_sysfs(void)
#ifdef CONFIG_BTRFS_DEBUG
ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_debug_feature_attr_group);
- if (ret)
- goto out2;
+ if (ret) {
+ sysfs_unmerge_group(&btrfs_kset->kobj,
+ &btrfs_static_feature_attr_group);
+ goto out_remove_group;
+ }
#endif
return 0;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 813986e38258..c3cf3dabe0b1 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3694,15 +3694,29 @@ static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
u64 *last_old_dentry_offset)
{
struct btrfs_root *log = inode->root->log_root;
- struct extent_buffer *src = path->nodes[0];
- const int nritems = btrfs_header_nritems(src);
+ struct extent_buffer *src;
+ const int nritems = btrfs_header_nritems(path->nodes[0]);
const u64 ino = btrfs_ino(inode);
bool last_found = false;
int batch_start = 0;
int batch_size = 0;
int i;
- for (i = path->slots[0]; i < nritems; i++) {
+ /*
+ * We need to clone the leaf, release the read lock on it, and use the
+ * clone before modifying the log tree. See the comment at copy_items()
+ * about why we need to do this.
+ */
+ src = btrfs_clone_extent_buffer(path->nodes[0]);
+ if (!src)
+ return -ENOMEM;
+
+ i = path->slots[0];
+ btrfs_release_path(path);
+ path->nodes[0] = src;
+ path->slots[0] = i;
+
+ for (; i < nritems; i++) {
struct btrfs_dir_item *di;
struct btrfs_key key;
int ret;
@@ -4303,7 +4317,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
{
struct btrfs_root *log = inode->root->log_root;
struct btrfs_file_extent_item *extent;
- struct extent_buffer *src = src_path->nodes[0];
+ struct extent_buffer *src;
int ret = 0;
struct btrfs_key *ins_keys;
u32 *ins_sizes;
@@ -4314,6 +4328,43 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
const bool skip_csum = (inode->flags & BTRFS_INODE_NODATASUM);
const u64 i_size = i_size_read(&inode->vfs_inode);
+ /*
+ * To keep lockdep happy and avoid deadlocks, clone the source leaf and
+ * use the clone. This is because otherwise we would be changing the log
+ * tree, to insert items from the subvolume tree or insert csum items,
+ * while holding a read lock on a leaf from the subvolume tree, which
+ * creates a nasty lock dependency when COWing log tree nodes/leaves:
+ *
+ * 1) Modifying the log tree triggers an extent buffer allocation while
+ * holding a write lock on a parent extent buffer from the log tree.
+ * Allocating the pages for an extent buffer, or the extent buffer
+ * struct, can trigger inode eviction and finally the inode eviction
+ * will trigger a release/remove of a delayed node, which requires
+ * taking the delayed node's mutex;
+ *
+ * 2) Allocating a metadata extent for a log tree can trigger the async
+ * reclaim thread and make us wait for it to release enough space and
+ * unblock our reservation ticket. The reclaim thread can start
+ * flushing delayed items, and that in turn results in the need to
+ * lock delayed node mutexes and in the need to write lock extent
+ * buffers of a subvolume tree - all this while holding a write lock
+ * on the parent extent buffer in the log tree.
+ *
+ * So one task in scenario 1) running in parallel with another task in
+ * scenario 2) could lead to a deadlock, one wanting to lock a delayed
+ * node mutex while having a read lock on a leaf from the subvolume,
+ * while the other is holding the delayed node's mutex and wants to
+ * write lock the same subvolume leaf for flushing delayed items.
+ */
+ src = btrfs_clone_extent_buffer(src_path->nodes[0]);
+ if (!src)
+ return -ENOMEM;
+
+ i = src_path->slots[0];
+ btrfs_release_path(src_path);
+ src_path->nodes[0] = src;
+ src_path->slots[0] = i;
+
ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
nr * sizeof(u32), GFP_NOFS);
if (!ins_data)
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 1912abf6d020..c9e2b0c85309 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -134,7 +134,8 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
super[i] = page_address(page[i]);
}
- if (super[0]->generation > super[1]->generation)
+ if (btrfs_super_generation(super[0]) >
+ btrfs_super_generation(super[1]))
sector = zones[1].start;
else
sector = zones[0].start;
@@ -466,7 +467,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
goto out;
}
- zones = kcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
+ zones = kvcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
if (!zones) {
ret = -ENOMEM;
goto out;
@@ -585,7 +586,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
}
- kfree(zones);
+ kvfree(zones);
switch (bdev_zoned_model(bdev)) {
case BLK_ZONED_HM:
@@ -617,7 +618,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
return 0;
out:
- kfree(zones);
+ kvfree(zones);
out_free_zone_info:
btrfs_destroy_dev_zone_info(device);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index fb023f9fafcb..e54814d0c2f7 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2248,7 +2248,6 @@ static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode)
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req1 = NULL, *req2 = NULL;
- unsigned int max_sessions;
int ret, err = 0;
spin_lock(&ci->i_unsafe_lock);
@@ -2267,27 +2266,23 @@ static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode)
spin_unlock(&ci->i_unsafe_lock);
/*
- * The mdsc->max_sessions is unlikely to be changed
- * mostly, here we will retry it by reallocating the
- * sessions array memory to get rid of the mdsc->mutex
- * lock.
- */
-retry:
- max_sessions = mdsc->max_sessions;
-
- /*
* Trigger to flush the journal logs in all the relevant MDSes
* manually, or in the worst case we must wait at most 5 seconds
* to wait the journal logs to be flushed by the MDSes periodically.
*/
- if ((req1 || req2) && likely(max_sessions)) {
- struct ceph_mds_session **sessions = NULL;
- struct ceph_mds_session *s;
+ if (req1 || req2) {
struct ceph_mds_request *req;
+ struct ceph_mds_session **sessions;
+ struct ceph_mds_session *s;
+ unsigned int max_sessions;
int i;
+ mutex_lock(&mdsc->mutex);
+ max_sessions = mdsc->max_sessions;
+
sessions = kcalloc(max_sessions, sizeof(s), GFP_KERNEL);
if (!sessions) {
+ mutex_unlock(&mdsc->mutex);
err = -ENOMEM;
goto out;
}
@@ -2299,16 +2294,6 @@ retry:
s = req->r_session;
if (!s)
continue;
- if (unlikely(s->s_mds >= max_sessions)) {
- spin_unlock(&ci->i_unsafe_lock);
- for (i = 0; i < max_sessions; i++) {
- s = sessions[i];
- if (s)
- ceph_put_mds_session(s);
- }
- kfree(sessions);
- goto retry;
- }
if (!sessions[s->s_mds]) {
s = ceph_get_mds_session(s);
sessions[s->s_mds] = s;
@@ -2321,16 +2306,6 @@ retry:
s = req->r_session;
if (!s)
continue;
- if (unlikely(s->s_mds >= max_sessions)) {
- spin_unlock(&ci->i_unsafe_lock);
- for (i = 0; i < max_sessions; i++) {
- s = sessions[i];
- if (s)
- ceph_put_mds_session(s);
- }
- kfree(sessions);
- goto retry;
- }
if (!sessions[s->s_mds]) {
s = ceph_get_mds_session(s);
sessions[s->s_mds] = s;
@@ -2342,11 +2317,12 @@ retry:
/* the auth MDS */
spin_lock(&ci->i_ceph_lock);
if (ci->i_auth_cap) {
- s = ci->i_auth_cap->session;
- if (!sessions[s->s_mds])
- sessions[s->s_mds] = ceph_get_mds_session(s);
+ s = ci->i_auth_cap->session;
+ if (!sessions[s->s_mds])
+ sessions[s->s_mds] = ceph_get_mds_session(s);
}
spin_unlock(&ci->i_ceph_lock);
+ mutex_unlock(&mdsc->mutex);
/* send flush mdlog request to MDSes */
for (i = 0; i < max_sessions; i++) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 4af5e55abc15..bad9eeb6a1a5 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2492,7 +2492,7 @@ int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
struct inode *parent;
parent = ceph_lookup_inode(sb, ceph_ino(inode));
- if (!parent)
+ if (IS_ERR(parent))
return PTR_ERR(parent);
pci = ceph_inode(parent);
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 864cdaa0d2bd..e4151852184e 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -763,7 +763,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
struct ceph_mds_snap_realm *ri; /* encoded */
__le64 *snaps; /* encoded */
__le64 *prior_parent_snaps; /* encoded */
- struct ceph_snap_realm *realm = NULL;
+ struct ceph_snap_realm *realm;
struct ceph_snap_realm *first_realm = NULL;
struct ceph_snap_realm *realm_to_rebuild = NULL;
int rebuild_snapcs;
@@ -774,6 +774,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
dout("%s deletion=%d\n", __func__, deletion);
more:
+ realm = NULL;
rebuild_snapcs = 0;
ceph_decode_need(&p, e, sizeof(*ri), bad);
ri = p;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index fe220686bba4..712a43161448 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1281,7 +1281,7 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
off + len - 1);
if (rc)
- goto out;
+ goto unlock;
/* should we flush first and last page first */
truncate_inode_pages(&target_inode->i_data, 0);
@@ -1297,6 +1297,8 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
* that target is updated on the server
*/
CIFS_I(target_inode)->time = 0;
+
+unlock:
/* although unlocking in the reverse order from locking is not
* strictly necessary here it is a little cleaner to be consistent
*/
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 1cc47dd3b4d6..9db9527c61cf 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3855,9 +3855,13 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id);
out:
- free_xid(mnt_ctx.xid);
cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
- return mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
+ rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
+ if (rc)
+ goto error;
+
+ free_xid(mnt_ctx.xid);
+ return rc;
error:
dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id);
@@ -3884,8 +3888,12 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
goto error;
}
+ rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
+ if (rc)
+ goto error;
+
free_xid(mnt_ctx.xid);
- return mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
+ return rc;
error:
mount_put_conns(&mnt_ctx);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 89d5fa887364..6419ec47c2a8 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -343,7 +343,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
rc = put_user(ExtAttrBits &
FS_FL_USER_VISIBLE,
(int __user *)arg);
- if (rc != EOPNOTSUPP)
+ if (rc != -EOPNOTSUPP)
break;
}
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
@@ -373,7 +373,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
* pSMBFile->fid.netfid,
* extAttrBits,
* &ExtAttrMask);
- * if (rc != EOPNOTSUPP)
+ * if (rc != -EOPNOTSUPP)
* break;
*/
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 92e4278ec35d..9e7d9f0baa18 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -302,14 +302,14 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
/* now drop the ref to the current iface */
if (old_iface && iface) {
- kref_put(&old_iface->refcount, release_iface);
cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
&old_iface->sockaddr,
&iface->sockaddr);
- } else if (old_iface) {
kref_put(&old_iface->refcount, release_iface);
+ } else if (old_iface) {
cifs_dbg(FYI, "releasing ref to iface: %pIS\n",
&old_iface->sockaddr);
+ kref_put(&old_iface->refcount, release_iface);
} else {
WARN_ON(!iface);
cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 880cd494afea..bfaafd02fb1f 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1116,6 +1116,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid,
FILE_FULL_EA_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
+ if (rc)
+ goto sea_exit;
smb2_set_next_command(tcon, &rqst[1]);
smb2_set_related(&rqst[1]);
@@ -1126,6 +1128,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
rqst[2].rq_nvec = 1;
rc = SMB2_close_init(tcon, server,
&rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+ if (rc)
+ goto sea_exit;
smb2_set_related(&rqst[2]);
rc = compound_send_recv(xid, ses, server,
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index fe05bc51f9f2..af5ed6b9c54d 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -75,11 +75,15 @@ static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq)
rcu_read_lock();
xas_for_each(&xas, folio, last_page) {
- unsigned int pgpos =
- (folio_index(folio) - start_page) * PAGE_SIZE;
- unsigned int pgend = pgpos + folio_size(folio);
+ unsigned int pgpos, pgend;
bool pg_failed = false;
+ if (xas_retry(&xas, folio))
+ continue;
+
+ pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
+ pgend = pgpos + folio_size(folio);
+
for (;;) {
if (!subreq) {
pg_failed = true;
@@ -287,22 +291,25 @@ static int erofs_fscache_data_read(struct address_space *mapping,
return PTR_ERR(src);
iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, PAGE_SIZE);
- if (copy_to_iter(src + offset, size, &iter) != size)
+ if (copy_to_iter(src + offset, size, &iter) != size) {
+ erofs_put_metabuf(&buf);
return -EFAULT;
+ }
iov_iter_zero(PAGE_SIZE - size, &iter);
erofs_put_metabuf(&buf);
return PAGE_SIZE;
}
- count = min_t(size_t, map.m_llen - (pos - map.m_la), len);
- DBG_BUGON(!count || count % PAGE_SIZE);
-
if (!(map.m_flags & EROFS_MAP_MAPPED)) {
+ count = len;
iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, count);
iov_iter_zero(count, &iter);
return count;
}
+ count = min_t(size_t, map.m_llen - (pos - map.m_la), len);
+ DBG_BUGON(!count || count % PAGE_SIZE);
+
mdev = (struct erofs_map_dev) {
.m_deviceid = map.m_deviceid,
.m_pa = map.m_pa,
@@ -403,13 +410,13 @@ static void erofs_fscache_domain_put(struct erofs_domain *domain)
static int erofs_fscache_register_volume(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
- char *domain_id = sbi->opt.domain_id;
+ char *domain_id = sbi->domain_id;
struct fscache_volume *volume;
char *name;
int ret = 0;
name = kasprintf(GFP_KERNEL, "erofs,%s",
- domain_id ? domain_id : sbi->opt.fsid);
+ domain_id ? domain_id : sbi->fsid);
if (!name)
return -ENOMEM;
@@ -435,7 +442,7 @@ static int erofs_fscache_init_domain(struct super_block *sb)
if (!domain)
return -ENOMEM;
- domain->domain_id = kstrdup(sbi->opt.domain_id, GFP_KERNEL);
+ domain->domain_id = kstrdup(sbi->domain_id, GFP_KERNEL);
if (!domain->domain_id) {
kfree(domain);
return -ENOMEM;
@@ -472,7 +479,7 @@ static int erofs_fscache_register_domain(struct super_block *sb)
mutex_lock(&erofs_domain_list_lock);
list_for_each_entry(domain, &erofs_domain_list, list) {
- if (!strcmp(domain->domain_id, sbi->opt.domain_id)) {
+ if (!strcmp(domain->domain_id, sbi->domain_id)) {
sbi->domain = domain;
sbi->volume = domain->volume;
refcount_inc(&domain->ref);
@@ -609,7 +616,7 @@ struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
char *name, bool need_inode)
{
- if (EROFS_SB(sb)->opt.domain_id)
+ if (EROFS_SB(sb)->domain_id)
return erofs_domain_register_cookie(sb, name, need_inode);
return erofs_fscache_acquire_cookie(sb, name, need_inode);
}
@@ -641,7 +648,7 @@ int erofs_fscache_register_fs(struct super_block *sb)
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_fscache *fscache;
- if (sbi->opt.domain_id)
+ if (sbi->domain_id)
ret = erofs_fscache_register_domain(sb);
else
ret = erofs_fscache_register_volume(sb);
@@ -649,7 +656,7 @@ int erofs_fscache_register_fs(struct super_block *sb)
return ret;
/* acquired domain/volume will be relinquished in kill_sb() on error */
- fscache = erofs_fscache_register_cookie(sb, sbi->opt.fsid, true);
+ fscache = erofs_fscache_register_cookie(sb, sbi->fsid, true);
if (IS_ERR(fscache))
return PTR_ERR(fscache);
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 1701df48c446..05dc68627722 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -75,8 +75,6 @@ struct erofs_mount_opts {
unsigned int max_sync_decompress_pages;
#endif
unsigned int mount_opt;
- char *fsid;
- char *domain_id;
};
struct erofs_dev_context {
@@ -89,6 +87,8 @@ struct erofs_dev_context {
struct erofs_fs_context {
struct erofs_mount_opts opt;
struct erofs_dev_context *devs;
+ char *fsid;
+ char *domain_id;
};
/* all filesystem-wide lz4 configurations */
@@ -170,6 +170,8 @@ struct erofs_sb_info {
struct fscache_volume *volume;
struct erofs_fscache *s_fscache;
struct erofs_domain *domain;
+ char *fsid;
+ char *domain_id;
};
#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 2cf96ce1c32e..1c7dcca702b3 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -579,9 +579,9 @@ static int erofs_fc_parse_param(struct fs_context *fc,
break;
case Opt_fsid:
#ifdef CONFIG_EROFS_FS_ONDEMAND
- kfree(ctx->opt.fsid);
- ctx->opt.fsid = kstrdup(param->string, GFP_KERNEL);
- if (!ctx->opt.fsid)
+ kfree(ctx->fsid);
+ ctx->fsid = kstrdup(param->string, GFP_KERNEL);
+ if (!ctx->fsid)
return -ENOMEM;
#else
errorfc(fc, "fsid option not supported");
@@ -589,9 +589,9 @@ static int erofs_fc_parse_param(struct fs_context *fc,
break;
case Opt_domain_id:
#ifdef CONFIG_EROFS_FS_ONDEMAND
- kfree(ctx->opt.domain_id);
- ctx->opt.domain_id = kstrdup(param->string, GFP_KERNEL);
- if (!ctx->opt.domain_id)
+ kfree(ctx->domain_id);
+ ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
+ if (!ctx->domain_id)
return -ENOMEM;
#else
errorfc(fc, "domain_id option not supported");
@@ -728,10 +728,12 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_fs_info = sbi;
sbi->opt = ctx->opt;
- ctx->opt.fsid = NULL;
- ctx->opt.domain_id = NULL;
sbi->devs = ctx->devs;
ctx->devs = NULL;
+ sbi->fsid = ctx->fsid;
+ ctx->fsid = NULL;
+ sbi->domain_id = ctx->domain_id;
+ ctx->domain_id = NULL;
if (erofs_is_fscache_mode(sb)) {
sb->s_blocksize = EROFS_BLKSIZ;
@@ -820,7 +822,7 @@ static int erofs_fc_get_tree(struct fs_context *fc)
{
struct erofs_fs_context *ctx = fc->fs_private;
- if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->opt.fsid)
+ if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
return get_tree_nodev(fc, erofs_fc_fill_super);
return get_tree_bdev(fc, erofs_fc_fill_super);
@@ -834,6 +836,9 @@ static int erofs_fc_reconfigure(struct fs_context *fc)
DBG_BUGON(!sb_rdonly(sb));
+ if (ctx->fsid || ctx->domain_id)
+ erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
+
if (test_opt(&ctx->opt, POSIX_ACL))
fc->sb_flags |= SB_POSIXACL;
else
@@ -873,8 +878,8 @@ static void erofs_fc_free(struct fs_context *fc)
struct erofs_fs_context *ctx = fc->fs_private;
erofs_free_dev_context(ctx->devs);
- kfree(ctx->opt.fsid);
- kfree(ctx->opt.domain_id);
+ kfree(ctx->fsid);
+ kfree(ctx->domain_id);
kfree(ctx);
}
@@ -944,8 +949,8 @@ static void erofs_kill_sb(struct super_block *sb)
erofs_free_dev_context(sbi->devs);
fs_put_dax(sbi->dax_dev, NULL);
erofs_fscache_unregister_fs(sb);
- kfree(sbi->opt.fsid);
- kfree(sbi->opt.domain_id);
+ kfree(sbi->fsid);
+ kfree(sbi->domain_id);
kfree(sbi);
sb->s_fs_info = NULL;
}
@@ -1098,10 +1103,10 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
if (test_opt(opt, DAX_NEVER))
seq_puts(seq, ",dax=never");
#ifdef CONFIG_EROFS_FS_ONDEMAND
- if (opt->fsid)
- seq_printf(seq, ",fsid=%s", opt->fsid);
- if (opt->domain_id)
- seq_printf(seq, ",domain_id=%s", opt->domain_id);
+ if (sbi->fsid)
+ seq_printf(seq, ",fsid=%s", sbi->fsid);
+ if (sbi->domain_id)
+ seq_printf(seq, ",domain_id=%s", sbi->domain_id);
#endif
return 0;
}
diff --git a/fs/erofs/sysfs.c b/fs/erofs/sysfs.c
index 783bb7b21b51..fd476961f742 100644
--- a/fs/erofs/sysfs.c
+++ b/fs/erofs/sysfs.c
@@ -210,14 +210,14 @@ int erofs_register_sysfs(struct super_block *sb)
int err;
if (erofs_is_fscache_mode(sb)) {
- if (sbi->opt.domain_id) {
- str = kasprintf(GFP_KERNEL, "%s,%s", sbi->opt.domain_id,
- sbi->opt.fsid);
+ if (sbi->domain_id) {
+ str = kasprintf(GFP_KERNEL, "%s,%s", sbi->domain_id,
+ sbi->fsid);
if (!str)
return -ENOMEM;
name = str;
} else {
- name = sbi->opt.fsid;
+ name = sbi->fsid;
}
} else {
name = sb->s_id;
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 064a166324a7..b792d424d774 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -660,6 +660,9 @@ static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
u8 *src, *dst;
unsigned int i, cnt;
+ if (!packed_inode)
+ return -EFSCORRUPTED;
+
pos += EROFS_I(inode)->z_fragmentoff;
for (i = 0; i < len; i += cnt) {
cnt = min_t(unsigned int, len - i,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index f1956288307f..6c399a8b22b3 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5184,6 +5184,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
* and it is decreased till we reach start.
*/
again:
+ ret = 0;
if (SHIFT == SHIFT_LEFT)
iterator = &start;
else
@@ -5227,14 +5228,21 @@ again:
ext4_ext_get_actual_len(extent);
} else {
extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
- if (le32_to_cpu(extent->ee_block) > 0)
+ if (le32_to_cpu(extent->ee_block) > start)
*iterator = le32_to_cpu(extent->ee_block) - 1;
- else
- /* Beginning is reached, end of the loop */
+ else if (le32_to_cpu(extent->ee_block) == start)
iterator = NULL;
- /* Update path extent in case we need to stop */
- while (le32_to_cpu(extent->ee_block) < start)
+ else {
+ extent = EXT_LAST_EXTENT(path[depth].p_hdr);
+ while (le32_to_cpu(extent->ee_block) >= start)
+ extent--;
+
+ if (extent == EXT_LAST_EXTENT(path[depth].p_hdr))
+ break;
+
extent++;
+ iterator = NULL;
+ }
path[depth].p_ext = extent;
}
ret = ext4_ext_shift_path_extents(path, shift, inode,
diff --git a/fs/file.c b/fs/file.c
index 5f9c802a5d8d..c942c89ca4cd 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -1003,7 +1003,16 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask)
struct files_struct *files = current->files;
struct file *file;
- if (atomic_read(&files->count) == 1) {
+ /*
+ * If another thread is concurrently calling close_fd() followed
+ * by put_files_struct(), we must not observe the old table
+ * entry combined with the new refcount - otherwise we could
+ * return a file that is concurrently being freed.
+ *
+ * atomic_read_acquire() pairs with atomic_dec_and_test() in
+ * put_files_struct().
+ */
+ if (atomic_read_acquire(&files->count) == 1) {
file = files_lookup_fd_raw(files, fd);
if (!file || unlikely(file->f_mode & mask))
return 0;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 443f83382b9b..9958d4020771 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1712,18 +1712,26 @@ static int writeback_single_inode(struct inode *inode,
wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
/*
- * If the inode is now fully clean, then it can be safely removed from
- * its writeback list (if any). Otherwise the flusher threads are
- * responsible for the writeback lists.
+ * If the inode is freeing, its i_io_list shoudn't be updated
+ * as it can be finally deleted at this moment.
*/
- if (!(inode->i_state & I_DIRTY_ALL))
- inode_cgwb_move_to_attached(inode, wb);
- else if (!(inode->i_state & I_SYNC_QUEUED)) {
- if ((inode->i_state & I_DIRTY))
- redirty_tail_locked(inode, wb);
- else if (inode->i_state & I_DIRTY_TIME) {
- inode->dirtied_when = jiffies;
- inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
+ if (!(inode->i_state & I_FREEING)) {
+ /*
+ * If the inode is now fully clean, then it can be safely
+ * removed from its writeback list (if any). Otherwise the
+ * flusher threads are responsible for the writeback lists.
+ */
+ if (!(inode->i_state & I_DIRTY_ALL))
+ inode_cgwb_move_to_attached(inode, wb);
+ else if (!(inode->i_state & I_SYNC_QUEUED)) {
+ if ((inode->i_state & I_DIRTY))
+ redirty_tail_locked(inode, wb);
+ else if (inode->i_state & I_DIRTY_TIME) {
+ inode->dirtied_when = jiffies;
+ inode_io_list_move_locked(inode,
+ wb,
+ &wb->b_dirty_time);
+ }
}
}
diff --git a/fs/fscache/volume.c b/fs/fscache/volume.c
index a058e0136bfe..ab8ceddf9efa 100644
--- a/fs/fscache/volume.c
+++ b/fs/fscache/volume.c
@@ -203,7 +203,11 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
struct fscache_volume *volume;
struct fscache_cache *cache;
size_t klen, hlen;
- char *key;
+ u8 *key;
+
+ klen = strlen(volume_key);
+ if (klen > NAME_MAX)
+ return NULL;
if (!coherency_data)
coherency_len = 0;
@@ -229,7 +233,6 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
/* Stick the length on the front of the key and pad it out to make
* hashing easier.
*/
- klen = strlen(volume_key);
hlen = round_up(1 + klen + 1, sizeof(__le32));
key = kzalloc(hlen, GFP_KERNEL);
if (!key)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 71bfb663aac5..89f4741728ba 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2963,11 +2963,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
.mode = mode
};
int err;
- bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
- (mode & (FALLOC_FL_PUNCH_HOLE |
- FALLOC_FL_ZERO_RANGE));
-
- bool block_faults = FUSE_IS_DAX(inode) && lock_inode;
+ bool block_faults = FUSE_IS_DAX(inode) &&
+ (!(mode & FALLOC_FL_KEEP_SIZE) ||
+ (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)));
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_ZERO_RANGE))
@@ -2976,22 +2974,20 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
if (fm->fc->no_fallocate)
return -EOPNOTSUPP;
- if (lock_inode) {
- inode_lock(inode);
- if (block_faults) {
- filemap_invalidate_lock(inode->i_mapping);
- err = fuse_dax_break_layouts(inode, 0, 0);
- if (err)
- goto out;
- }
+ inode_lock(inode);
+ if (block_faults) {
+ filemap_invalidate_lock(inode->i_mapping);
+ err = fuse_dax_break_layouts(inode, 0, 0);
+ if (err)
+ goto out;
+ }
- if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) {
- loff_t endbyte = offset + length - 1;
+ if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) {
+ loff_t endbyte = offset + length - 1;
- err = fuse_writeback_range(inode, offset, endbyte);
- if (err)
- goto out;
- }
+ err = fuse_writeback_range(inode, offset, endbyte);
+ if (err)
+ goto out;
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
@@ -3039,8 +3035,7 @@ out:
if (block_faults)
filemap_invalidate_unlock(inode->i_mapping);
- if (lock_inode)
- inode_unlock(inode);
+ inode_unlock(inode);
fuse_flush_time_update(inode);
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 3990f3e270cb..f33b3baad07c 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -31,10 +31,15 @@ static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
+static bool __kernfs_active(struct kernfs_node *kn)
+{
+ return atomic_read(&kn->active) >= 0;
+}
+
static bool kernfs_active(struct kernfs_node *kn)
{
lockdep_assert_held(&kernfs_root(kn)->kernfs_rwsem);
- return atomic_read(&kn->active) >= 0;
+ return __kernfs_active(kn);
}
static bool kernfs_lockdep(struct kernfs_node *kn)
@@ -705,7 +710,12 @@ struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
goto err_unlock;
}
- if (unlikely(!kernfs_active(kn) || !atomic_inc_not_zero(&kn->count)))
+ /*
+ * We should fail if @kn has never been activated and guarantee success
+ * if the caller knows that @kn is active. Both can be achieved by
+ * __kernfs_active() which tests @kn->active without kernfs_rwsem.
+ */
+ if (unlikely(!__kernfs_active(kn) || !atomic_inc_not_zero(&kn->count)))
goto err_unlock;
spin_unlock(&kernfs_idr_lock);
diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c
index 8de970d6146f..94b8ed4ef870 100644
--- a/fs/ksmbd/vfs.c
+++ b/fs/ksmbd/vfs.c
@@ -1794,9 +1794,9 @@ int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
ret = vfs_copy_file_range(src_fp->filp, src_off,
dst_fp->filp, dst_off, len, 0);
if (ret == -EOPNOTSUPP || ret == -EXDEV)
- ret = generic_copy_file_range(src_fp->filp, src_off,
- dst_fp->filp, dst_off,
- len, 0);
+ ret = vfs_copy_file_range(src_fp->filp, src_off,
+ dst_fp->filp, dst_off, len,
+ COPY_FILE_SPLICE);
if (ret < 0)
return ret;
diff --git a/fs/namei.c b/fs/namei.c
index 578c2110df02..9155ecb547ce 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3591,6 +3591,7 @@ static int vfs_tmpfile(struct user_namespace *mnt_userns,
struct inode *dir = d_inode(parentpath->dentry);
struct inode *inode;
int error;
+ int open_flag = file->f_flags;
/* we want directory to be writable */
error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
@@ -3613,7 +3614,7 @@ static int vfs_tmpfile(struct user_namespace *mnt_userns,
if (error)
return error;
inode = file_inode(file);
- if (!(file->f_flags & O_EXCL)) {
+ if (!(open_flag & O_EXCL)) {
spin_lock(&inode->i_lock);
inode->i_state |= I_LINKABLE;
spin_unlock(&inode->i_lock);
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 0ce535852151..7679a68e8193 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -17,9 +17,9 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
struct folio *folio;
- unsigned int iopos, account = 0;
pgoff_t start_page = rreq->start / PAGE_SIZE;
pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
+ size_t account = 0;
bool subreq_failed = false;
XA_STATE(xas, &rreq->mapping->i_pages, start_page);
@@ -39,18 +39,23 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
*/
subreq = list_first_entry(&rreq->subrequests,
struct netfs_io_subrequest, rreq_link);
- iopos = 0;
subreq_failed = (subreq->error < 0);
trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
rcu_read_lock();
xas_for_each(&xas, folio, last_page) {
- unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
- unsigned int pgend = pgpos + folio_size(folio);
+ loff_t pg_end;
bool pg_failed = false;
+ if (xas_retry(&xas, folio))
+ continue;
+
+ pg_end = folio_pos(folio) + folio_size(folio) - 1;
+
for (;;) {
+ loff_t sreq_end;
+
if (!subreq) {
pg_failed = true;
break;
@@ -58,11 +63,11 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
folio_start_fscache(folio);
pg_failed |= subreq_failed;
- if (pgend < iopos + subreq->len)
+ sreq_end = subreq->start + subreq->len - 1;
+ if (pg_end < sreq_end)
break;
account += subreq->transferred;
- iopos += subreq->len;
if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
subreq = list_next_entry(subreq, rreq_link);
subreq_failed = (subreq->error < 0);
@@ -70,7 +75,8 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
subreq = NULL;
subreq_failed = false;
}
- if (pgend == iopos)
+
+ if (pg_end == sreq_end)
break;
}
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index 428925899282..e374767d1b68 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -121,6 +121,9 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
+ if (xas_retry(&xas, folio))
+ continue;
+
/* We might have multiple writes from the same huge
* folio, but we mustn't unlock a folio more than once.
*/
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 06a96e955bd0..d4b6839bb459 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -254,7 +254,10 @@ TRACE_EVENT_CONDITION(nfsd_fh_verify_err,
rqstp->rq_xprt->xpt_remotelen);
__entry->xid = be32_to_cpu(rqstp->rq_xid);
__entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
- __entry->inode = d_inode(fhp->fh_dentry);
+ if (fhp->fh_dentry)
+ __entry->inode = d_inode(fhp->fh_dentry);
+ else
+ __entry->inode = NULL;
__entry->type = type;
__entry->access = access;
__entry->error = be32_to_cpu(error);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index f650afedd67f..849a720ab43f 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -596,8 +596,8 @@ ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0);
if (ret == -EOPNOTSUPP || ret == -EXDEV)
- ret = generic_copy_file_range(src, src_pos, dst, dst_pos,
- count, 0);
+ ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count,
+ COPY_FILE_SPLICE);
return ret;
}
@@ -871,10 +871,11 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct svc_rqst *rqstp = sd->u.data;
struct page *page = buf->page; // may be a compound one
unsigned offset = buf->offset;
+ struct page *last_page;
- page += offset / PAGE_SIZE;
- for (int i = sd->len; i > 0; i -= PAGE_SIZE)
- svc_rqst_replace_page(rqstp, page++);
+ last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
+ for (page += offset / PAGE_SIZE; page <= last_page; page++)
+ svc_rqst_replace_page(rqstp, page);
if (rqstp->rq_res.page_len == 0) // first call
rqstp->rq_res.page_base = offset % PAGE_SIZE;
rqstp->rq_res.page_len += sd->len;
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 3b55e239705f..9930fa901039 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -111,6 +111,13 @@ static void nilfs_dat_commit_free(struct inode *dat,
kunmap_atomic(kaddr);
nilfs_dat_commit_entry(dat, req);
+
+ if (unlikely(req->pr_desc_bh == NULL || req->pr_bitmap_bh == NULL)) {
+ nilfs_error(dat->i_sb,
+ "state inconsistency probably due to duplicate use of vblocknr = %llu",
+ (unsigned long long)req->pr_entry_nr);
+ return;
+ }
nilfs_palloc_commit_free_entry(dat, req);
}
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 77ff8e95421f..dc359b56fdfa 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -495,14 +495,22 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
{
struct buffer_head *bh;
+ void *kaddr;
+ struct nilfs_segment_usage *su;
int ret;
+ down_write(&NILFS_MDT(sufile)->mi_sem);
ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
if (!ret) {
mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile);
+ kaddr = kmap_atomic(bh->b_page);
+ su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+ nilfs_segment_usage_set_dirty(su);
+ kunmap_atomic(kaddr);
brelse(bh);
}
+ up_write(&NILFS_MDT(sufile)->mi_sem);
return ret;
}
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 5101131e6047..440960110a42 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -115,7 +115,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
#endif
show_val_kb(m, "PageTables: ",
global_node_page_state(NR_PAGETABLE));
- show_val_kb(m, "SecPageTables: ",
+ show_val_kb(m, "SecPageTables: ",
global_node_page_state(NR_SECONDARY_PAGETABLE));
show_val_kb(m, "NFS_Unstable: ", 0);
diff --git a/fs/read_write.c b/fs/read_write.c
index 328ce8cf9a85..24b9668d6377 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1388,6 +1388,8 @@ ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
size_t len, unsigned int flags)
{
+ lockdep_assert(sb_write_started(file_inode(file_out)->i_sb));
+
return do_splice_direct(file_in, &pos_in, file_out, &pos_out,
len > MAX_RW_COUNT ? MAX_RW_COUNT : len, 0);
}
@@ -1424,7 +1426,9 @@ static int generic_copy_file_checks(struct file *file_in, loff_t pos_in,
* and several different sets of file_operations, but they all end up
* using the same ->copy_file_range() function pointer.
*/
- if (file_out->f_op->copy_file_range) {
+ if (flags & COPY_FILE_SPLICE) {
+ /* cross sb splice is allowed */
+ } else if (file_out->f_op->copy_file_range) {
if (file_in->f_op->copy_file_range !=
file_out->f_op->copy_file_range)
return -EXDEV;
@@ -1474,8 +1478,9 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
size_t len, unsigned int flags)
{
ssize_t ret;
+ bool splice = flags & COPY_FILE_SPLICE;
- if (flags != 0)
+ if (flags & ~COPY_FILE_SPLICE)
return -EINVAL;
ret = generic_copy_file_checks(file_in, pos_in, file_out, pos_out, &len,
@@ -1501,14 +1506,14 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
* same sb using clone, but for filesystems where both clone and copy
* are supported (e.g. nfs,cifs), we only call the copy method.
*/
- if (file_out->f_op->copy_file_range) {
+ if (!splice && file_out->f_op->copy_file_range) {
ret = file_out->f_op->copy_file_range(file_in, pos_in,
file_out, pos_out,
len, flags);
goto done;
}
- if (file_in->f_op->remap_file_range &&
+ if (!splice && file_in->f_op->remap_file_range &&
file_inode(file_in)->i_sb == file_inode(file_out)->i_sb) {
ret = file_in->f_op->remap_file_range(file_in, pos_in,
file_out, pos_out,
@@ -1528,6 +1533,8 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
* consistent story about which filesystems support copy_file_range()
* and which filesystems do not, that will allow userspace tools to
* make consistent desicions w.r.t using copy_file_range().
+ *
+ * We also get here if caller (e.g. nfsd) requested COPY_FILE_SPLICE.
*/
ret = generic_copy_file_range(file_in, pos_in, file_out, pos_out, len,
flags);
@@ -1582,6 +1589,10 @@ SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in,
pos_out = f_out.file->f_pos;
}
+ ret = -EINVAL;
+ if (flags != 0)
+ goto out;
+
ret = vfs_copy_file_range(f_in.file, pos_in, f_out.file, pos_out, len,
flags);
if (ret > 0) {
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 860f0b1032c6..2c53fbb8d918 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -41,6 +41,13 @@ static void zonefs_account_active(struct inode *inode)
return;
/*
+ * For zones that transitioned to the offline or readonly condition,
+ * we only need to clear the active state.
+ */
+ if (zi->i_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY))
+ goto out;
+
+ /*
* If the zone is active, that is, if it is explicitly open or
* partially written, check if it was already accounted as active.
*/
@@ -53,6 +60,7 @@ static void zonefs_account_active(struct inode *inode)
return;
}
+out:
/* The zone is not active. If it was, update the active count */
if (zi->i_flags & ZONEFS_ZONE_ACTIVE) {
zi->i_flags &= ~ZONEFS_ZONE_ACTIVE;
@@ -324,6 +332,7 @@ static loff_t zonefs_check_zone_condition(struct inode *inode,
inode->i_flags |= S_IMMUTABLE;
inode->i_mode &= ~0777;
zone->wp = zone->start;
+ zi->i_flags |= ZONEFS_ZONE_OFFLINE;
return 0;
case BLK_ZONE_COND_READONLY:
/*
@@ -342,8 +351,10 @@ static loff_t zonefs_check_zone_condition(struct inode *inode,
zone->cond = BLK_ZONE_COND_OFFLINE;
inode->i_mode &= ~0777;
zone->wp = zone->start;
+ zi->i_flags |= ZONEFS_ZONE_OFFLINE;
return 0;
}
+ zi->i_flags |= ZONEFS_ZONE_READONLY;
inode->i_mode &= ~0222;
return i_size_read(inode);
case BLK_ZONE_COND_FULL:
@@ -478,8 +489,7 @@ static void __zonefs_io_error(struct inode *inode, bool write)
struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
unsigned int noio_flag;
- unsigned int nr_zones =
- zi->i_zone_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
+ unsigned int nr_zones = 1;
struct zonefs_ioerr_data err = {
.inode = inode,
.write = write,
@@ -487,6 +497,15 @@ static void __zonefs_io_error(struct inode *inode, bool write)
int ret;
/*
+ * The only files that have more than one zone are conventional zone
+ * files with aggregated conventional zones, for which the inode zone
+ * size is always larger than the device zone size.
+ */
+ if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev))
+ nr_zones = zi->i_zone_size >>
+ (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
+
+ /*
* Memory allocations in blkdev_report_zones() can trigger a memory
* reclaim which may in turn cause a recursion into zonefs as well as
* struct request allocations for the same device. The former case may
@@ -1407,6 +1426,14 @@ static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
zi->i_ztype = type;
zi->i_zsector = zone->start;
zi->i_zone_size = zone->len << SECTOR_SHIFT;
+ if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT &&
+ !(sbi->s_features & ZONEFS_F_AGGRCNV)) {
+ zonefs_err(sb,
+ "zone size %llu doesn't match device's zone sectors %llu\n",
+ zi->i_zone_size,
+ bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT);
+ return -EINVAL;
+ }
zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
zone->capacity << SECTOR_SHIFT);
@@ -1456,11 +1483,11 @@ static struct dentry *zonefs_create_inode(struct dentry *parent,
struct inode *dir = d_inode(parent);
struct dentry *dentry;
struct inode *inode;
- int ret;
+ int ret = -ENOMEM;
dentry = d_alloc_name(parent, name);
if (!dentry)
- return NULL;
+ return ERR_PTR(ret);
inode = new_inode(parent->d_sb);
if (!inode)
@@ -1485,7 +1512,7 @@ static struct dentry *zonefs_create_inode(struct dentry *parent,
dput:
dput(dentry);
- return NULL;
+ return ERR_PTR(ret);
}
struct zonefs_zone_data {
@@ -1505,7 +1532,7 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
struct blk_zone *zone, *next, *end;
const char *zgroup_name;
char *file_name;
- struct dentry *dir;
+ struct dentry *dir, *dent;
unsigned int n = 0;
int ret;
@@ -1523,8 +1550,8 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
zgroup_name = "seq";
dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
- if (!dir) {
- ret = -ENOMEM;
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
goto free;
}
@@ -1570,8 +1597,9 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
* Use the file number within its group as file name.
*/
snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n);
- if (!zonefs_create_inode(dir, file_name, zone, type)) {
- ret = -ENOMEM;
+ dent = zonefs_create_inode(dir, file_name, zone, type);
+ if (IS_ERR(dent)) {
+ ret = PTR_ERR(dent);
goto free;
}
@@ -1905,18 +1933,18 @@ static int __init zonefs_init(void)
if (ret)
return ret;
- ret = register_filesystem(&zonefs_type);
+ ret = zonefs_sysfs_init();
if (ret)
goto destroy_inodecache;
- ret = zonefs_sysfs_init();
+ ret = register_filesystem(&zonefs_type);
if (ret)
- goto unregister_fs;
+ goto sysfs_exit;
return 0;
-unregister_fs:
- unregister_filesystem(&zonefs_type);
+sysfs_exit:
+ zonefs_sysfs_exit();
destroy_inodecache:
zonefs_destroy_inodecache();
@@ -1925,9 +1953,9 @@ destroy_inodecache:
static void __exit zonefs_exit(void)
{
+ unregister_filesystem(&zonefs_type);
zonefs_sysfs_exit();
zonefs_destroy_inodecache();
- unregister_filesystem(&zonefs_type);
}
MODULE_AUTHOR("Damien Le Moal");
diff --git a/fs/zonefs/sysfs.c b/fs/zonefs/sysfs.c
index 9cb6755ce39a..9920689dc098 100644
--- a/fs/zonefs/sysfs.c
+++ b/fs/zonefs/sysfs.c
@@ -15,11 +15,6 @@ struct zonefs_sysfs_attr {
ssize_t (*show)(struct zonefs_sb_info *sbi, char *buf);
};
-static inline struct zonefs_sysfs_attr *to_attr(struct attribute *attr)
-{
- return container_of(attr, struct zonefs_sysfs_attr, attr);
-}
-
#define ZONEFS_SYSFS_ATTR_RO(name) \
static struct zonefs_sysfs_attr zonefs_sysfs_attr_##name = __ATTR_RO(name)
diff --git a/fs/zonefs/zonefs.h b/fs/zonefs/zonefs.h
index 4b3de66c3233..1dbe78119ff1 100644
--- a/fs/zonefs/zonefs.h
+++ b/fs/zonefs/zonefs.h
@@ -39,8 +39,10 @@ static inline enum zonefs_ztype zonefs_zone_type(struct blk_zone *zone)
return ZONEFS_ZTYPE_SEQ;
}
-#define ZONEFS_ZONE_OPEN (1 << 0)
-#define ZONEFS_ZONE_ACTIVE (1 << 1)
+#define ZONEFS_ZONE_OPEN (1U << 0)
+#define ZONEFS_ZONE_ACTIVE (1U << 1)
+#define ZONEFS_ZONE_OFFLINE (1U << 2)
+#define ZONEFS_ZONE_READONLY (1U << 3)
/*
* In-memory inode data.
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 492dce43236e..cab7cfebf40b 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -222,12 +222,16 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#define tlb_needs_table_invalidate() (true)
#endif
+void tlb_remove_table_sync_one(void);
+
#else
#ifdef tlb_needs_table_invalidate
#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
#endif
+static inline void tlb_remove_table_sync_one(void) { }
+
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 50e358a19d98..891f8cbcd043 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -311,6 +311,13 @@ struct queue_limits {
unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive;
enum blk_zoned_model zoned;
+
+ /*
+ * Drivers that set dma_alignment to less than 511 must be prepared to
+ * handle individual bvec's that are not a multiple of a SECTOR_SIZE
+ * due to possible offsets.
+ */
+ unsigned int dma_alignment;
};
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
@@ -456,12 +463,6 @@ struct request_queue {
unsigned long nr_requests; /* Max # of requests */
unsigned int dma_pad_mask;
- /*
- * Drivers that set dma_alignment to less than 511 must be prepared to
- * handle individual bvec's that are not a multiple of a SECTOR_SIZE
- * due to possible offsets.
- */
- unsigned int dma_alignment;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_crypto_profile *crypto_profile;
@@ -944,7 +945,6 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
-extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
@@ -1324,7 +1324,7 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev)
static inline int queue_dma_alignment(const struct request_queue *q)
{
- return q ? q->dma_alignment : 511;
+ return q ? q->limits.dma_alignment : 511;
}
static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 0566705c1d4e..c1bd1bd10506 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -27,7 +27,7 @@
#include <linux/bpfptr.h>
#include <linux/btf.h>
#include <linux/rcupdate_trace.h>
-#include <linux/init.h>
+#include <linux/static_call.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -315,7 +315,7 @@ static inline void __copy_map_value(struct bpf_map *map, void *dst, void *src, b
u32 next_off = map->off_arr->field_off[i];
memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
- curr_off += map->off_arr->field_sz[i];
+ curr_off = next_off + map->off_arr->field_sz[i];
}
memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
}
@@ -344,7 +344,7 @@ static inline void zero_map_value(struct bpf_map *map, void *dst)
u32 next_off = map->off_arr->field_off[i];
memset(dst + curr_off, 0, next_off - curr_off);
- curr_off += map->off_arr->field_sz[i];
+ curr_off = next_off + map->off_arr->field_sz[i];
}
memset(dst + curr_off, 0, map->value_size - curr_off);
}
@@ -954,6 +954,10 @@ struct bpf_dispatcher {
void *rw_image;
u32 image_off;
struct bpf_ksym ksym;
+#ifdef CONFIG_HAVE_STATIC_CALL
+ struct static_call_key *sc_key;
+ void *sc_tramp;
+#endif
};
static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
@@ -971,7 +975,33 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
-int __init bpf_arch_init_dispatcher_early(void *ip);
+
+/*
+ * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
+ * indirection with a direct call to the bpf program. If the architecture does
+ * not have STATIC_CALL, avoid a double-indirection.
+ */
+#ifdef CONFIG_HAVE_STATIC_CALL
+
+#define __BPF_DISPATCHER_SC_INIT(_name) \
+ .sc_key = &STATIC_CALL_KEY(_name), \
+ .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
+
+#define __BPF_DISPATCHER_SC(name) \
+ DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
+
+#define __BPF_DISPATCHER_CALL(name) \
+ static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
+
+#define __BPF_DISPATCHER_UPDATE(_d, _new) \
+ __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
+
+#else
+#define __BPF_DISPATCHER_SC_INIT(name)
+#define __BPF_DISPATCHER_SC(name)
+#define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi)
+#define __BPF_DISPATCHER_UPDATE(_d, _new)
+#endif
#define BPF_DISPATCHER_INIT(_name) { \
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
@@ -984,34 +1014,21 @@ int __init bpf_arch_init_dispatcher_early(void *ip);
.name = #_name, \
.lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
}, \
+ __BPF_DISPATCHER_SC_INIT(_name##_call) \
}
-#define BPF_DISPATCHER_INIT_CALL(_name) \
- static int __init _name##_init(void) \
- { \
- return bpf_arch_init_dispatcher_early(_name##_func); \
- } \
- early_initcall(_name##_init)
-
-#ifdef CONFIG_X86_64
-#define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5)))
-#else
-#define BPF_DISPATCHER_ATTRIBUTES
-#endif
-
#define DEFINE_BPF_DISPATCHER(name) \
- notrace BPF_DISPATCHER_ATTRIBUTES \
+ __BPF_DISPATCHER_SC(name); \
noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
bpf_func_t bpf_func) \
{ \
- return bpf_func(ctx, insnsi); \
+ return __BPF_DISPATCHER_CALL(name); \
} \
EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
struct bpf_dispatcher bpf_dispatcher_##name = \
- BPF_DISPATCHER_INIT(bpf_dispatcher_##name); \
- BPF_DISPATCHER_INIT_CALL(bpf_dispatcher_##name);
+ BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
#define DECLARE_BPF_DISPATCHER(name) \
unsigned int bpf_dispatcher_##name##_func( \
@@ -1019,6 +1036,7 @@ int __init bpf_arch_init_dispatcher_early(void *ip);
const struct bpf_insn *insnsi, \
bpf_func_t bpf_func); \
extern struct bpf_dispatcher bpf_dispatcher_##name;
+
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h
index 5755ae5a4712..6a869682c120 100644
--- a/include/linux/can/platform/sja1000.h
+++ b/include/linux/can/platform/sja1000.h
@@ -14,7 +14,7 @@
#define OCR_MODE_TEST 0x01
#define OCR_MODE_NORMAL 0x02
#define OCR_MODE_CLOCK 0x03
-#define OCR_MODE_MASK 0x07
+#define OCR_MODE_MASK 0x03
#define OCR_TX0_INVERT 0x04
#define OCR_TX0_PULLDOWN 0x08
#define OCR_TX0_PULLUP 0x10
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 9f6e25467844..444236dadcf0 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -20,7 +20,6 @@ struct fault_attr {
atomic_t space;
unsigned long verbose;
bool task_filter;
- bool no_warn;
unsigned long stacktrace_depth;
unsigned long require_start;
unsigned long require_end;
@@ -32,6 +31,10 @@ struct fault_attr {
struct dentry *dname;
};
+enum fault_flags {
+ FAULT_NOWARN = 1 << 0,
+};
+
#define FAULT_ATTR_INITIALIZER { \
.interval = 1, \
.times = ATOMIC_INIT(1), \
@@ -40,11 +43,11 @@ struct fault_attr {
.ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \
.verbose = 2, \
.dname = NULL, \
- .no_warn = false, \
}
#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
int setup_fault_attr(struct fault_attr *attr, char *str);
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags);
bool should_fail(struct fault_attr *attr, ssize_t size);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e654435f1651..59ae95ddb679 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2089,6 +2089,14 @@ struct dir_context {
*/
#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN)
+/*
+ * These flags control the behavior of vfs_copy_file_range().
+ * They are not available to the user via syscall.
+ *
+ * COPY_FILE_SPLICE: call splice direct instead of fs clone/copy ops
+ */
+#define COPY_FILE_SPLICE (1 << 0)
+
struct iov_iter;
struct io_uring_cmd;
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 36e5dd84cf59..8e312c8323a8 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -75,7 +75,7 @@ struct fscache_volume {
atomic_t n_accesses; /* Number of cache accesses in progress */
unsigned int debug_id;
unsigned int key_hash; /* Hash of key string */
- char *key; /* Volume ID, eg. "afs@example.com@1234" */
+ u8 *key; /* Volume ID, eg. "afs@example.com@1234" */
struct list_head proc_link; /* Link in /proc/fs/fscache/volumes */
struct hlist_bl_node hash_link; /* Link in hash table */
struct work_struct work;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index ef4aea3b356e..65a78773dcca 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -210,6 +210,20 @@ alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct p
return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array);
}
+static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
+{
+ gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
+
+ if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN))
+ return;
+
+ if (node_online(this_node))
+ return;
+
+ pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
+ dump_stack();
+}
+
/*
* Allocate pages, preferring the node given as nid. The node must be valid and
* online. For more general interface, see alloc_pages_node().
@@ -218,7 +232,7 @@ static inline struct page *
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
+ warn_if_node_offline(nid, gfp_mask);
return __alloc_pages(gfp_mask, order, nid, NULL);
}
@@ -227,7 +241,7 @@ static inline
struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON((gfp & __GFP_THISNODE) && !node_online(nid));
+ warn_if_node_offline(nid, gfp);
return __folio_alloc(gfp, order, nid, NULL);
}
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 43bc8a2edccf..0ded9e271523 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -16,6 +16,9 @@ enum io_uring_cmd_flags {
IO_URING_F_SQE128 = 4,
IO_URING_F_CQE32 = 8,
IO_URING_F_IOPOLL = 16,
+
+ /* the request is executed from poll, it should not be freed */
+ IO_URING_F_MULTISHOT = 32,
};
struct io_uring_cmd {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 18592bdf4c1b..637a60607c7d 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -776,6 +776,7 @@ struct kvm {
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
pid_t userspace_pid;
+ bool override_halt_poll_ns;
unsigned int max_halt_poll_ns;
u32 dirty_ring_size;
bool vm_bugged;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index af2ceb4160bc..06cbad166225 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -981,6 +981,7 @@ struct mlx5_async_work {
struct mlx5_async_ctx *ctx;
mlx5_async_cbk_t user_callback;
u16 opcode; /* cmd opcode */
+ u16 op_mod; /* cmd op_mod */
void *out; /* pointer to the cmd output buffer */
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8bbcccbc5565..974ccca609d2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1852,6 +1852,25 @@ static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodem
__show_free_areas(flags, nodemask, MAX_NR_ZONES - 1);
}
+/*
+ * Parameter block passed down to zap_pte_range in exceptional cases.
+ */
+struct zap_details {
+ struct folio *single_folio; /* Locked folio to be unmapped */
+ bool even_cows; /* Zap COWed private pages too? */
+ zap_flags_t zap_flags; /* Extra flags for zapping */
+};
+
+/*
+ * Whether to drop the pte markers, for example, the uffd-wp information for
+ * file-backed memory. This should only be specified when we will completely
+ * drop the page in the mm, either by truncation or unmapping of the vma. By
+ * default, the flag is not set.
+ */
+#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
+/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
+#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
+
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
#else
@@ -1869,6 +1888,8 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
+void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size, struct zap_details *details);
void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
struct vm_area_struct *start_vma, unsigned long start,
unsigned long end);
@@ -3467,12 +3488,4 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
}
#endif
-/*
- * Whether to drop the pte markers, for example, the uffd-wp information for
- * file-backed memory. This should only be specified when we will completely
- * drop the page in the mm, either by truncation or unmapping of the vma. By
- * default, the flag is not set.
- */
-#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
-
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 9c50bc40f8ff..6f7993803ee7 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -451,7 +451,7 @@ static inline bool mmc_ready_for_data(u32 status)
#define MMC_SECURE_TRIM1_ARG 0x80000001
#define MMC_SECURE_TRIM2_ARG 0x80008000
#define MMC_SECURE_ARGS 0x80000000
-#define MMC_TRIM_ARGS 0x00008001
+#define MMC_TRIM_OR_DISCARD_ARGS 0x00008003
#define mmc_driver_type_mask(n) (1 << (n))
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index a108b60a6962..5f0d7d0b9471 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -165,6 +165,13 @@ static inline pte_t *virt_to_kpte(unsigned long vaddr)
return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
}
+#ifndef pmd_young
+static inline int pmd_young(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
@@ -260,6 +267,17 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef arch_has_hw_nonleaf_pmd_young
+/*
+ * Return whether the accessed bit in non-leaf PMD entries is supported on the
+ * local CPU.
+ */
+static inline bool arch_has_hw_nonleaf_pmd_young(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
+}
+#endif
+
#ifndef arch_has_hw_pte_young
/*
* Return whether the accessed bit is supported on the local CPU.
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 2504df9a0453..3c7d295746f6 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -100,7 +100,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
- struct file *filp, poll_table *poll_table);
+ struct file *filp, poll_table *poll_table, int full);
void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
#define RING_BUFFER_ALL_CPUS -1
diff --git a/include/linux/trace.h b/include/linux/trace.h
index b5e16e438448..80ffda871749 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -26,13 +26,13 @@ struct trace_export {
int flags;
};
+struct trace_array;
+
#ifdef CONFIG_TRACING
int register_ftrace_export(struct trace_export *export);
int unregister_ftrace_export(struct trace_export *export);
-struct trace_array;
-
void trace_printk_init_buffers(void);
__printf(3, 4)
int trace_array_printk(struct trace_array *tr, unsigned long ip,
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index e7cebeb875dd..fdd393f70b19 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -189,6 +189,7 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device);
void vfio_unregister_group_dev(struct vfio_device *device);
int vfio_assign_device_set(struct vfio_device *device, void *set_id);
+unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set);
int vfio_mig_get_next_state(struct vfio_device *device,
enum vfio_device_mig_state cur_fsm,
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 3af1e927247d..69174093078f 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -281,7 +281,8 @@ inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, in
* sk_v6_rcv_saddr (ipv6) changes after it has been binded. The socket's
* rcv_saddr field should already have been updated when this is called.
*/
-int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct sock *sk);
+int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family);
+void inet_bhash2_reset_saddr(struct sock *sk);
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
struct inet_bind2_bucket *tb2, unsigned short port);
diff --git a/include/net/ip.h b/include/net/ip.h
index 038097c2a152..144bdfbb25af 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -563,7 +563,7 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
offsetof(typeof(flow->addrs), v4addrs.src) +
sizeof(flow->addrs.v4addrs.src));
- memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
+ memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 37943ba3a73c..d383c895592a 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -897,7 +897,7 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
offsetof(typeof(flow->addrs), v6addrs.src) +
sizeof(flow->addrs.v6addrs.src));
- memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
+ memcpy(&flow->addrs.v6addrs, &iph->addrs, sizeof(flow->addrs.v6addrs));
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
}
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 20745cf7ae1a..2f2a6023fb0e 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -83,7 +83,7 @@ struct neigh_parms {
struct rcu_head rcu_head;
int reachable_time;
- int qlen;
+ u32 qlen;
int data[NEIGH_VAR_DATA_MAX];
DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
};
diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h
index 01a70b27e026..65058faea4db 100644
--- a/include/net/sctp/stream_sched.h
+++ b/include/net/sctp/stream_sched.h
@@ -26,6 +26,8 @@ struct sctp_sched_ops {
int (*init)(struct sctp_stream *stream);
/* Init a stream */
int (*init_sid)(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
+ /* free a stream */
+ void (*free_sid)(struct sctp_stream *stream, __u16 sid);
/* Frees the entire thing */
void (*free)(struct sctp_stream *stream);
diff --git a/include/net/sock.h b/include/net/sock.h
index 5db02546941c..e0517ecc6531 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -323,7 +323,7 @@ struct sk_filter;
* @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_zckey: counter to order MSG_ZEROCOPY notifications
* @sk_socket: Identd and reporting IO signals
- * @sk_user_data: RPC layer private data
+ * @sk_user_data: RPC layer private data. Write-protected by @sk_callback_lock.
* @sk_frag: cached page frag
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h
index 6ce3bd22f6c6..5ad7ac2e3a7c 100644
--- a/include/soc/at91/sama7-ddr.h
+++ b/include/soc/at91/sama7-ddr.h
@@ -26,7 +26,10 @@
#define DDR3PHY_PGSR (0x0C) /* DDR3PHY PHY General Status Register */
#define DDR3PHY_PGSR_IDONE (1 << 0) /* Initialization Done */
-#define DDR3PHY_ACIOCR (0x24) /* DDR3PHY AC I/O Configuration Register */
+#define DDR3PHY_ACDLLCR (0x14) /* DDR3PHY AC DLL Control Register */
+#define DDR3PHY_ACDLLCR_DLLSRST (1 << 30) /* DLL Soft Reset */
+
+#define DDR3PHY_ACIOCR (0x24) /* DDR3PHY AC I/O Configuration Register */
#define DDR3PHY_ACIOCR_CSPDD_CS0 (1 << 18) /* CS#[0] Power Down Driver */
#define DDR3PHY_ACIOCR_CKPDD_CK0 (1 << 8) /* CK[0] Power Down Driver */
#define DDR3PHY_ACIORC_ACPDD (1 << 3) /* AC Power Down Driver */
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 83fd81c82e4c..9fbd3832bcdc 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -84,8 +84,8 @@ enum sof_ipc_dai_type {
SOF_DAI_AMD_BT, /**< AMD ACP BT*/
SOF_DAI_AMD_SP, /**< AMD ACP SP */
SOF_DAI_AMD_DMIC, /**< AMD ACP DMIC */
- SOF_DAI_AMD_HS, /**< Amd HS */
SOF_DAI_MEDIATEK_AFE, /**< Mediatek AFE */
+ SOF_DAI_AMD_HS, /**< Amd HS */
};
/* general purpose DAI configuration */
diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h
index 65e86e4e9fd8..75193850ead0 100644
--- a/include/sound/sof/info.h
+++ b/include/sound/sof/info.h
@@ -36,6 +36,10 @@ enum sof_ipc_ext_data {
SOF_IPC_EXT_USER_ABI_INFO = 4,
};
+/* Build u32 number in format MMmmmppp */
+#define SOF_FW_VER(MAJOR, MINOR, PATCH) ((uint32_t)( \
+ ((MAJOR) << 24) | ((MINOR) << 12) | (PATCH)))
+
/* FW version - SOF_IPC_GLB_VERSION */
struct sof_ipc_fw_version {
struct sof_ipc_hdr hdr;
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 935af4947917..760455dfa860 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -171,15 +171,15 @@ TRACE_EVENT(mm_collapse_huge_page_swapin,
TRACE_EVENT(mm_khugepaged_scan_file,
- TP_PROTO(struct mm_struct *mm, struct page *page, const char *filename,
+ TP_PROTO(struct mm_struct *mm, struct page *page, struct file *file,
int present, int swap, int result),
- TP_ARGS(mm, page, filename, present, swap, result),
+ TP_ARGS(mm, page, file, present, swap, result),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, pfn)
- __string(filename, filename)
+ __string(filename, file->f_path.dentry->d_iname)
__field(int, present)
__field(int, swap)
__field(int, result)
@@ -188,7 +188,7 @@ TRACE_EVENT(mm_khugepaged_scan_file,
TP_fast_assign(
__entry->mm = mm;
__entry->pfn = page ? page_to_pfn(page) : -1;
- __assign_str(filename, filename);
+ __assign_str(filename, file->f_path.dentry->d_iname);
__entry->present = present;
__entry->swap = swap;
__entry->result = result;
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 961ec16a26b8..874a92349bf5 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -100,8 +100,10 @@ struct iphdr {
__u8 ttl;
__u8 protocol;
__sum16 check;
- __be32 saddr;
- __be32 daddr;
+ __struct_group(/* no tag */, addrs, /* no attrs */,
+ __be32 saddr;
+ __be32 daddr;
+ );
/*The options start here. */
};
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 03cdbe798fe3..81f4243bebb1 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -130,8 +130,10 @@ struct ipv6hdr {
__u8 nexthdr;
__u8 hop_limit;
- struct in6_addr saddr;
- struct in6_addr daddr;
+ __struct_group(/* no tag */, addrs, /* no attrs */,
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ );
};
diff --git a/init/Kconfig b/init/Kconfig
index abf65098f1b6..94125d3b6893 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -87,7 +87,7 @@ config CC_HAS_ASM_GOTO_OUTPUT
config CC_HAS_ASM_GOTO_TIED_OUTPUT
depends on CC_HAS_ASM_GOTO_OUTPUT
# Detect buggy gcc and clang, fixed in gcc-11 clang-14.
- def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .\n": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null)
+ def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null)
config TOOLS_SUPPORT_RELR
def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh)
diff --git a/io_uring/filetable.c b/io_uring/filetable.c
index 7b473259f3f4..68dfc6936aa7 100644
--- a/io_uring/filetable.c
+++ b/io_uring/filetable.c
@@ -101,8 +101,6 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
err:
if (needs_switch)
io_rsrc_node_switch(ctx, ctx->file_data);
- if (ret)
- fput(file);
return ret;
}
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 4a1e482747cc..8840cf3e20f2 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1768,7 +1768,7 @@ int io_poll_issue(struct io_kiocb *req, bool *locked)
io_tw_lock(req->ctx, locked);
if (unlikely(req->task->flags & PF_EXITING))
return -EFAULT;
- return io_issue_sqe(req, IO_URING_F_NONBLOCK);
+ return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT);
}
struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index e99a79f2df9b..50bc3af44953 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -17,8 +17,8 @@ enum {
IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
/*
- * Intended only when both REQ_F_POLLED and REQ_F_APOLL_MULTISHOT
- * are set to indicate to the poll runner that multishot should be
+ * Intended only when both IO_URING_F_MULTISHOT is passed
+ * to indicate to the poll runner that multishot should be
* removed and the result is set on req->cqe.res.
*/
IOU_STOP_MULTISHOT = -ECANCELED,
@@ -238,9 +238,14 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
static inline int io_run_task_work(void)
{
+ /*
+ * Always check-and-clear the task_work notification signal. With how
+ * signaling works for task_work, we can find it set with nothing to
+ * run. We need to clear it for that case, like get_signal() does.
+ */
+ if (test_thread_flag(TIF_NOTIFY_SIGNAL))
+ clear_notify_signal();
if (task_work_pending(current)) {
- if (test_thread_flag(TIF_NOTIFY_SIGNAL))
- clear_notify_signal();
__set_current_state(TASK_RUNNING);
task_work_run();
return 1;
diff --git a/io_uring/net.c b/io_uring/net.c
index 15dea91625e2..ab83da7e80f0 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -67,8 +67,6 @@ struct io_sr_msg {
struct io_kiocb *notif;
};
-#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
-
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
@@ -591,7 +589,8 @@ static inline void io_recv_prep_retry(struct io_kiocb *req)
* again (for multishot).
*/
static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
- unsigned int cflags, bool mshot_finished)
+ unsigned int cflags, bool mshot_finished,
+ unsigned issue_flags)
{
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
io_req_set_res(req, *ret, cflags);
@@ -614,7 +613,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
io_req_set_res(req, *ret, cflags);
- if (req->flags & REQ_F_POLLED)
+ if (issue_flags & IO_URING_F_MULTISHOT)
*ret = IOU_STOP_MULTISHOT;
else
*ret = IOU_OK;
@@ -773,8 +772,7 @@ retry_multishot:
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
ret = io_setup_async_msg(req, kmsg, issue_flags);
- if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
- IO_APOLL_MULTI_POLLED) {
+ if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
io_kbuf_recycle(req, issue_flags);
return IOU_ISSUE_SKIP_COMPLETE;
}
@@ -803,7 +801,7 @@ retry_multishot:
if (kmsg->msg.msg_inq)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
- if (!io_recv_finish(req, &ret, cflags, mshot_finished))
+ if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
goto retry_multishot;
if (mshot_finished) {
@@ -869,7 +867,7 @@ retry_multishot:
ret = sock_recvmsg(sock, &msg, flags);
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
- if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
+ if (issue_flags & IO_URING_F_MULTISHOT) {
io_kbuf_recycle(req, issue_flags);
return IOU_ISSUE_SKIP_COMPLETE;
}
@@ -902,7 +900,7 @@ out_free:
if (msg.msg_inq)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
- if (!io_recv_finish(req, &ret, cflags, ret <= 0))
+ if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
goto retry_multishot;
return ret;
@@ -1289,8 +1287,7 @@ retry:
* return EAGAIN to arm the poll infra since it
* has already been done
*/
- if ((req->flags & IO_APOLL_MULTI_POLLED) ==
- IO_APOLL_MULTI_POLLED)
+ if (issue_flags & IO_URING_F_MULTISHOT)
ret = IOU_ISSUE_SKIP_COMPLETE;
return ret;
}
@@ -1315,9 +1312,7 @@ retry:
goto retry;
io_req_set_res(req, ret, 0);
- if (req->flags & REQ_F_POLLED)
- return IOU_STOP_MULTISHOT;
- return IOU_OK;
+ return (issue_flags & IO_URING_F_MULTISHOT) ? IOU_STOP_MULTISHOT : IOU_OK;
}
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
diff --git a/io_uring/poll.c b/io_uring/poll.c
index f500506984ec..d9bf1767867e 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -40,7 +40,14 @@ struct io_poll_table {
};
#define IO_POLL_CANCEL_FLAG BIT(31)
-#define IO_POLL_REF_MASK GENMASK(30, 0)
+#define IO_POLL_RETRY_FLAG BIT(30)
+#define IO_POLL_REF_MASK GENMASK(29, 0)
+
+/*
+ * We usually have 1-2 refs taken, 128 is more than enough and we want to
+ * maximise the margin between this amount and the moment when it overflows.
+ */
+#define IO_POLL_REF_BIAS 128
#define IO_WQE_F_DOUBLE 1
@@ -58,6 +65,21 @@ static inline bool wqe_is_double(struct wait_queue_entry *wqe)
return priv & IO_WQE_F_DOUBLE;
}
+static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
+{
+ int v;
+
+ /*
+ * poll_refs are already elevated and we don't have much hope for
+ * grabbing the ownership. Instead of incrementing set a retry flag
+ * to notify the loop that there might have been some change.
+ */
+ v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
+ if (v & IO_POLL_REF_MASK)
+ return false;
+ return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
+}
+
/*
* If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
* bump it and acquire ownership. It's disallowed to modify requests while not
@@ -66,6 +88,8 @@ static inline bool wqe_is_double(struct wait_queue_entry *wqe)
*/
static inline bool io_poll_get_ownership(struct io_kiocb *req)
{
+ if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
+ return io_poll_get_ownership_slowpath(req);
return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
}
@@ -228,6 +252,23 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
return IOU_POLL_DONE;
if (v & IO_POLL_CANCEL_FLAG)
return -ECANCELED;
+ /*
+ * cqe.res contains only events of the first wake up
+ * and all others are be lost. Redo vfs_poll() to get
+ * up to date state.
+ */
+ if ((v & IO_POLL_REF_MASK) != 1)
+ req->cqe.res = 0;
+ if (v & IO_POLL_RETRY_FLAG) {
+ req->cqe.res = 0;
+ /*
+ * We won't find new events that came in between
+ * vfs_poll and the ref put unless we clear the flag
+ * in advance.
+ */
+ atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
+ v &= ~IO_POLL_RETRY_FLAG;
+ }
/* the mask was stashed in __io_poll_execute */
if (!req->cqe.res) {
@@ -239,6 +280,8 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
continue;
if (req->apoll_events & EPOLLONESHOT)
return IOU_POLL_DONE;
+ if (io_is_uring_fops(req->file))
+ return IOU_POLL_DONE;
/* multishot, just fill a CQE and proceed */
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
@@ -258,11 +301,15 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
return ret;
}
+ /* force the next iteration to vfs_poll() */
+ req->cqe.res = 0;
+
/*
* Release all references, retry if someone tried to restart
* task_work while we were executing it.
*/
- } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
+ } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
+ IO_POLL_REF_MASK);
return IOU_POLL_NO_ACTION;
}
@@ -506,7 +553,6 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
- int v;
INIT_HLIST_NODE(&req->hash_node);
req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
@@ -574,11 +620,10 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
if (ipt->owning) {
/*
- * Release ownership. If someone tried to queue a tw while it was
- * locked, kick it off for them.
+ * Try to release ownership. If we see a change of state, e.g.
+ * poll was waken up, queue up a tw, it'll deal with it.
*/
- v = atomic_dec_return(&req->poll_refs);
- if (unlikely(v & IO_POLL_REF_MASK))
+ if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
__io_poll_execute(req, 0);
}
return 0;
diff --git a/ipc/shm.c b/ipc/shm.c
index 7d86f058fb86..bd2fcc4d454e 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -275,10 +275,8 @@ static inline void shm_rmid(struct shmid_kernel *s)
}
-static int __shm_open(struct vm_area_struct *vma)
+static int __shm_open(struct shm_file_data *sfd)
{
- struct file *file = vma->vm_file;
- struct shm_file_data *sfd = shm_file_data(file);
struct shmid_kernel *shp;
shp = shm_lock(sfd->ns, sfd->id);
@@ -302,7 +300,15 @@ static int __shm_open(struct vm_area_struct *vma)
/* This is called by fork, once for every shm attach. */
static void shm_open(struct vm_area_struct *vma)
{
- int err = __shm_open(vma);
+ struct file *file = vma->vm_file;
+ struct shm_file_data *sfd = shm_file_data(file);
+ int err;
+
+ /* Always call underlying open if present */
+ if (sfd->vm_ops->open)
+ sfd->vm_ops->open(vma);
+
+ err = __shm_open(sfd);
/*
* We raced in the idr lookup or with shm_destroy().
* Either way, the ID is busted.
@@ -359,10 +365,8 @@ static bool shm_may_destroy(struct shmid_kernel *shp)
* The descriptor has already been removed from the current->mm->mmap list
* and will later be kfree()d.
*/
-static void shm_close(struct vm_area_struct *vma)
+static void __shm_close(struct shm_file_data *sfd)
{
- struct file *file = vma->vm_file;
- struct shm_file_data *sfd = shm_file_data(file);
struct shmid_kernel *shp;
struct ipc_namespace *ns = sfd->ns;
@@ -388,6 +392,18 @@ done:
up_write(&shm_ids(ns).rwsem);
}
+static void shm_close(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct shm_file_data *sfd = shm_file_data(file);
+
+ /* Always call underlying close if present */
+ if (sfd->vm_ops->close)
+ sfd->vm_ops->close(vma);
+
+ __shm_close(sfd);
+}
+
/* Called with ns->shm_ids(ns).rwsem locked */
static int shm_try_destroy_orphaned(int id, void *p, void *data)
{
@@ -583,13 +599,13 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
* IPC ID that was removed, and possibly even reused by another shm
* segment already. Propagate this case as an error to caller.
*/
- ret = __shm_open(vma);
+ ret = __shm_open(sfd);
if (ret)
return ret;
ret = call_mmap(sfd->file, vma);
if (ret) {
- shm_close(vma);
+ __shm_close(sfd);
return ret;
}
sfd->vm_ops = vma->vm_ops;
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index 802fc15b0d73..f27fa5ba7d72 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -74,7 +74,7 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
gfp_flags | __GFP_NOWARN);
if (selem) {
if (value)
- memcpy(SDATA(selem)->data, value, smap->map.value_size);
+ copy_map_value(&smap->map, SDATA(selem)->data, value);
return selem;
}
diff --git a/kernel/bpf/dispatcher.c b/kernel/bpf/dispatcher.c
index 04f0a045dcaa..c19719f48ce0 100644
--- a/kernel/bpf/dispatcher.c
+++ b/kernel/bpf/dispatcher.c
@@ -4,7 +4,7 @@
#include <linux/hash.h>
#include <linux/bpf.h>
#include <linux/filter.h>
-#include <linux/init.h>
+#include <linux/static_call.h>
/* The BPF dispatcher is a multiway branch code generator. The
* dispatcher is a mechanism to avoid the performance penalty of an
@@ -91,11 +91,6 @@ int __weak arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int n
return -ENOTSUPP;
}
-int __weak __init bpf_arch_init_dispatcher_early(void *ip)
-{
- return -ENOTSUPP;
-}
-
static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *buf)
{
s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0];
@@ -110,17 +105,11 @@ static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *b
static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
{
- void *old, *new, *tmp;
- u32 noff;
- int err;
-
- if (!prev_num_progs) {
- old = NULL;
- noff = 0;
- } else {
- old = d->image + d->image_off;
+ void *new, *tmp;
+ u32 noff = 0;
+
+ if (prev_num_progs)
noff = d->image_off ^ (PAGE_SIZE / 2);
- }
new = d->num_progs ? d->image + noff : NULL;
tmp = d->num_progs ? d->rw_image + noff : NULL;
@@ -134,11 +123,10 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
return;
}
- err = bpf_arch_text_poke(d->func, BPF_MOD_JUMP, old, new);
- if (err || !new)
- return;
+ __BPF_DISPATCHER_UPDATE(d, new ?: (void *)&bpf_dispatcher_nop_func);
- d->image_off = noff;
+ if (new)
+ d->image_off = noff;
}
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index b6e7f5c5b9ab..034cf87b54e9 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -100,22 +100,21 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
u32 nr_elems)
{
struct pcpu_freelist_head *head;
- int i, cpu, pcpu_entries;
+ unsigned int cpu, cpu_idx, i, j, n, m;
- pcpu_entries = nr_elems / num_possible_cpus() + 1;
- i = 0;
+ n = nr_elems / num_possible_cpus();
+ m = nr_elems % num_possible_cpus();
+ cpu_idx = 0;
for_each_possible_cpu(cpu) {
-again:
head = per_cpu_ptr(s->freelist, cpu);
- /* No locking required as this is not visible yet. */
- pcpu_freelist_push_node(head, buf);
- i++;
- buf += elem_size;
- if (i == nr_elems)
- break;
- if (i % pcpu_entries)
- goto again;
+ j = n + (cpu_idx < m ? 1 : 0);
+ for (i = 0; i < j; i++) {
+ /* No locking required as this is not visible yet. */
+ pcpu_freelist_push_node(head, buf);
+ buf += elem_size;
+ }
+ cpu_idx++;
}
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 225666307bba..264b3dc714cc 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6745,11 +6745,11 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
/* Transfer references to the callee */
err = copy_reference_state(callee, caller);
if (err)
- return err;
+ goto err_out;
err = set_callee_state_cb(env, caller, callee, *insn_idx);
if (err)
- return err;
+ goto err_out;
clear_caller_saved_regs(env, caller->regs);
@@ -6766,6 +6766,11 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
print_verifier_state(env, callee, true);
}
return 0;
+
+err_out:
+ free_func_state(callee);
+ state->frame[state->curframe + 1] = NULL;
+ return err;
}
int map_set_for_each_callback_args(struct bpf_verifier_env *env,
@@ -6979,8 +6984,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
return -EINVAL;
}
- state->curframe--;
- caller = state->frame[state->curframe];
+ caller = state->frame[state->curframe - 1];
if (callee->in_callback_fn) {
/* enforce R0 return value range [0, 1]. */
struct tnum range = callee->callback_ret_range;
@@ -7019,7 +7023,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
}
/* clear everything in the callee */
free_func_state(callee);
- state->frame[state->curframe + 1] = NULL;
+ state->frame[state->curframe--] = NULL;
return 0;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4ec3717003d5..7f04f995c975 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2291,6 +2291,7 @@ event_sched_out(struct perf_event *event,
!event->pending_work) {
event->pending_work = 1;
dec = false;
+ WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
task_work_add(current, &event->pending_task, TWA_RESUME);
}
if (dec)
@@ -2336,6 +2337,7 @@ group_sched_out(struct perf_event *group_event,
#define DETACH_GROUP 0x01UL
#define DETACH_CHILD 0x02UL
+#define DETACH_DEAD 0x04UL
/*
* Cross CPU call to remove a performance event
@@ -2356,12 +2358,20 @@ __perf_remove_from_context(struct perf_event *event,
update_cgrp_time_from_cpuctx(cpuctx, false);
}
+ /*
+ * Ensure event_sched_out() switches to OFF, at the very least
+ * this avoids raising perf_pending_task() at this time.
+ */
+ if (flags & DETACH_DEAD)
+ event->pending_disable = 1;
event_sched_out(event, cpuctx, ctx);
if (flags & DETACH_GROUP)
perf_group_detach(event);
if (flags & DETACH_CHILD)
perf_child_detach(event);
list_del_event(event, ctx);
+ if (flags & DETACH_DEAD)
+ event->state = PERF_EVENT_STATE_DEAD;
if (!ctx->nr_events && ctx->is_active) {
if (ctx == &cpuctx->ctx)
@@ -5121,9 +5131,7 @@ int perf_event_release_kernel(struct perf_event *event)
ctx = perf_event_ctx_lock(event);
WARN_ON_ONCE(ctx->parent_ctx);
- perf_remove_from_context(event, DETACH_GROUP);
- raw_spin_lock_irq(&ctx->lock);
/*
* Mark this event as STATE_DEAD, there is no external reference to it
* anymore.
@@ -5135,8 +5143,7 @@ int perf_event_release_kernel(struct perf_event *event)
* Thus this guarantees that we will in fact observe and kill _ALL_
* child events.
*/
- event->state = PERF_EVENT_STATE_DEAD;
- raw_spin_unlock_irq(&ctx->lock);
+ perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD);
perf_event_ctx_unlock(event, ctx);
@@ -6577,6 +6584,8 @@ static void perf_pending_task(struct callback_head *head)
if (rctx >= 0)
perf_swevent_put_recursion_context(rctx);
preempt_enable_notrace();
+
+ put_event(event);
}
#ifdef CONFIG_GUEST_PERF_EVENTS
@@ -9030,7 +9039,7 @@ static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
PERF_RECORD_KSYMBOL_TYPE_BPF,
(u64)(unsigned long)subprog->bpf_func,
subprog->jited_len, unregister,
- prog->aux->ksym.name);
+ subprog->aux->ksym.name);
}
}
}
@@ -9273,6 +9282,19 @@ int perf_event_account_interrupt(struct perf_event *event)
return __perf_event_account_interrupt(event, 1);
}
+static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
+{
+ /*
+ * Due to interrupt latency (AKA "skid"), we may enter the
+ * kernel before taking an overflow, even if the PMU is only
+ * counting user events.
+ */
+ if (event->attr.exclude_kernel && !user_mode(regs))
+ return false;
+
+ return true;
+}
+
/*
* Generic event overflow handling, sampling.
*/
@@ -9307,15 +9329,38 @@ static int __perf_event_overflow(struct perf_event *event,
if (event->attr.sigtrap) {
/*
- * Should not be able to return to user space without processing
- * pending_sigtrap (kernel events can overflow multiple times).
+ * The desired behaviour of sigtrap vs invalid samples is a bit
+ * tricky; on the one hand, one should not loose the SIGTRAP if
+ * it is the first event, on the other hand, we should also not
+ * trigger the WARN or override the data address.
*/
- WARN_ON_ONCE(event->pending_sigtrap && event->attr.exclude_kernel);
+ bool valid_sample = sample_is_allowed(event, regs);
+ unsigned int pending_id = 1;
+
+ if (regs)
+ pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
if (!event->pending_sigtrap) {
- event->pending_sigtrap = 1;
+ event->pending_sigtrap = pending_id;
local_inc(&event->ctx->nr_pending);
+ } else if (event->attr.exclude_kernel && valid_sample) {
+ /*
+ * Should not be able to return to user space without
+ * consuming pending_sigtrap; with exceptions:
+ *
+ * 1. Where !exclude_kernel, events can overflow again
+ * in the kernel without returning to user space.
+ *
+ * 2. Events that can overflow again before the IRQ-
+ * work without user space progress (e.g. hrtimer).
+ * To approximate progress (with false negatives),
+ * check 32-bit hash of the current IP.
+ */
+ WARN_ON_ONCE(event->pending_sigtrap != pending_id);
}
- event->pending_addr = data->addr;
+
+ event->pending_addr = 0;
+ if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
+ event->pending_addr = data->addr;
irq_work_queue(&event->pending_irq);
}
diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c
index cbb0bed958ab..7670a811a565 100644
--- a/kernel/gcov/clang.c
+++ b/kernel/gcov/clang.c
@@ -280,6 +280,8 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
for (i = 0; i < sfn_ptr->num_counters; i++)
dfn_ptr->counters[i] += sfn_ptr->counters[i];
+
+ sfn_ptr = list_next_entry(sfn_ptr, head);
}
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index cd9f5a66a690..3050631e528d 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1766,7 +1766,13 @@ static int __unregister_kprobe_top(struct kprobe *p)
if ((list_p != p) && (list_p->post_handler))
goto noclean;
}
- ap->post_handler = NULL;
+ /*
+ * For the kprobe-on-ftrace case, we keep the
+ * post_handler setting to identify this aggrprobe
+ * armed with kprobe_ipmodify_ops.
+ */
+ if (!kprobe_ftrace(ap))
+ ap->post_handler = NULL;
}
noclean:
/*
diff --git a/kernel/rseq.c b/kernel/rseq.c
index bda8175f8f99..d38ab944105d 100644
--- a/kernel/rseq.c
+++ b/kernel/rseq.c
@@ -171,12 +171,27 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
return 0;
}
+static bool rseq_warn_flags(const char *str, u32 flags)
+{
+ u32 test_flags;
+
+ if (!flags)
+ return false;
+ test_flags = flags & RSEQ_CS_NO_RESTART_FLAGS;
+ if (test_flags)
+ pr_warn_once("Deprecated flags (%u) in %s ABI structure", test_flags, str);
+ test_flags = flags & ~RSEQ_CS_NO_RESTART_FLAGS;
+ if (test_flags)
+ pr_warn_once("Unknown flags (%u) in %s ABI structure", test_flags, str);
+ return true;
+}
+
static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
{
u32 flags, event_mask;
int ret;
- if (WARN_ON_ONCE(cs_flags & RSEQ_CS_NO_RESTART_FLAGS) || cs_flags)
+ if (rseq_warn_flags("rseq_cs", cs_flags))
return -EINVAL;
/* Get thread flags. */
@@ -184,7 +199,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
if (ret)
return ret;
- if (WARN_ON_ONCE(flags & RSEQ_CS_NO_RESTART_FLAGS) || flags)
+ if (rseq_warn_flags("rseq", flags))
return -EINVAL;
/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index cb2aa2b54c7a..daff72f00385 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4200,6 +4200,40 @@ out:
return success;
}
+static bool __task_needs_rq_lock(struct task_struct *p)
+{
+ unsigned int state = READ_ONCE(p->__state);
+
+ /*
+ * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
+ * the task is blocked. Make sure to check @state since ttwu() can drop
+ * locks at the end, see ttwu_queue_wakelist().
+ */
+ if (state == TASK_RUNNING || state == TASK_WAKING)
+ return true;
+
+ /*
+ * Ensure we load p->on_rq after p->__state, otherwise it would be
+ * possible to, falsely, observe p->on_rq == 0.
+ *
+ * See try_to_wake_up() for a longer comment.
+ */
+ smp_rmb();
+ if (p->on_rq)
+ return true;
+
+#ifdef CONFIG_SMP
+ /*
+ * Ensure the task has finished __schedule() and will not be referenced
+ * anymore. Again, see try_to_wake_up() for a longer comment.
+ */
+ smp_rmb();
+ smp_cond_load_acquire(&p->on_cpu, !VAL);
+#endif
+
+ return false;
+}
+
/**
* task_call_func - Invoke a function on task in fixed state
* @p: Process for which the function is to be invoked, can be @current.
@@ -4217,28 +4251,12 @@ out:
int task_call_func(struct task_struct *p, task_call_f func, void *arg)
{
struct rq *rq = NULL;
- unsigned int state;
struct rq_flags rf;
int ret;
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
- state = READ_ONCE(p->__state);
-
- /*
- * Ensure we load p->on_rq after p->__state, otherwise it would be
- * possible to, falsely, observe p->on_rq == 0.
- *
- * See try_to_wake_up() for a longer comment.
- */
- smp_rmb();
-
- /*
- * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
- * the task is blocked. Make sure to check @state since ttwu() can drop
- * locks at the end, see ttwu_queue_wakelist().
- */
- if (state == TASK_RUNNING || state == TASK_WAKING || p->on_rq)
+ if (__task_needs_rq_lock(p))
rq = __task_rq_lock(p, &rf);
/*
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 9161d1136d01..1207c78f85c1 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -25,9 +25,6 @@ struct sugov_policy {
unsigned int next_freq;
unsigned int cached_raw_freq;
- /* max CPU capacity, which is equal for all CPUs in freq. domain */
- unsigned long max;
-
/* The next fields are only needed if fast switch cannot be used: */
struct irq_work irq_work;
struct kthread_work work;
@@ -51,6 +48,7 @@ struct sugov_cpu {
unsigned long util;
unsigned long bw_dl;
+ unsigned long max;
/* The field below is for single-CPU policies only: */
#ifdef CONFIG_NO_HZ_COMMON
@@ -160,6 +158,7 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
{
struct rq *rq = cpu_rq(sg_cpu->cpu);
+ sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
sg_cpu->bw_dl = cpu_bw_dl(rq);
sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
FREQUENCY_UTIL, NULL);
@@ -254,7 +253,6 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
*/
static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
{
- struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned long boost;
/* No boost currently required */
@@ -282,8 +280,7 @@ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
* sg_cpu->util is already in capacity scale; convert iowait_boost
* into the same scale so we can compare.
*/
- boost = sg_cpu->iowait_boost * sg_policy->max;
- boost >>= SCHED_CAPACITY_SHIFT;
+ boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
if (sg_cpu->util < boost)
sg_cpu->util = boost;
@@ -340,7 +337,7 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
if (!sugov_update_single_common(sg_cpu, time, flags))
return;
- next_f = get_next_freq(sg_policy, sg_cpu->util, sg_policy->max);
+ next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
/*
* Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then.
@@ -376,7 +373,6 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
unsigned int flags)
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
- struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned long prev_util = sg_cpu->util;
/*
@@ -403,8 +399,7 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
sg_cpu->util = prev_util;
cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
- map_util_perf(sg_cpu->util),
- sg_policy->max);
+ map_util_perf(sg_cpu->util), sg_cpu->max);
sg_cpu->sg_policy->last_freq_update_time = time;
}
@@ -413,19 +408,25 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
- unsigned long util = 0;
+ unsigned long util = 0, max = 1;
unsigned int j;
for_each_cpu(j, policy->cpus) {
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
+ unsigned long j_util, j_max;
sugov_get_util(j_sg_cpu);
sugov_iowait_apply(j_sg_cpu, time);
+ j_util = j_sg_cpu->util;
+ j_max = j_sg_cpu->max;
- util = max(j_sg_cpu->util, util);
+ if (j_util * max > j_max * util) {
+ util = j_util;
+ max = j_max;
+ }
}
- return get_next_freq(sg_policy, util, sg_policy->max);
+ return get_next_freq(sg_policy, util, max);
}
static void
@@ -751,7 +752,7 @@ static int sugov_start(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy = policy->governor_data;
void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
- unsigned int cpu = cpumask_first(policy->cpus);
+ unsigned int cpu;
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
sg_policy->last_freq_update_time = 0;
@@ -759,7 +760,6 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_policy->work_in_progress = false;
sg_policy->limits_changed = false;
sg_policy->cached_raw_freq = 0;
- sg_policy->max = arch_scale_cpu_capacity(cpu);
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7dc023641bf1..33236241f236 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1289,6 +1289,7 @@ static int ftrace_add_mod(struct trace_array *tr,
if (!ftrace_mod)
return -ENOMEM;
+ INIT_LIST_HEAD(&ftrace_mod->list);
ftrace_mod->func = kstrdup(func, GFP_KERNEL);
ftrace_mod->module = kstrdup(module, GFP_KERNEL);
ftrace_mod->enable = enable;
@@ -3190,7 +3191,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
/* if we can't allocate this size, try something smaller */
if (!order)
return -ENOMEM;
- order >>= 1;
+ order--;
goto again;
}
@@ -7391,7 +7392,7 @@ void __init ftrace_init(void)
}
pr_info("ftrace: allocating %ld entries in %ld pages\n",
- count, count / ENTRIES_PER_PAGE + 1);
+ count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
ret = ftrace_process_locs(NULL,
__start_mcount_loc,
diff --git a/kernel/trace/kprobe_event_gen_test.c b/kernel/trace/kprobe_event_gen_test.c
index d81f7c51025c..c736487fc0e4 100644
--- a/kernel/trace/kprobe_event_gen_test.c
+++ b/kernel/trace/kprobe_event_gen_test.c
@@ -73,6 +73,10 @@ static struct trace_event_file *gen_kretprobe_test;
#define KPROBE_GEN_TEST_ARG3 NULL
#endif
+static bool trace_event_file_is_valid(struct trace_event_file *input)
+{
+ return input && !IS_ERR(input);
+}
/*
* Test to make sure we can create a kprobe event, then add more
@@ -139,6 +143,8 @@ static int __init test_gen_kprobe_cmd(void)
kfree(buf);
return ret;
delete:
+ if (trace_event_file_is_valid(gen_kprobe_test))
+ gen_kprobe_test = NULL;
/* We got an error after creating the event, delete it */
ret = kprobe_event_delete("gen_kprobe_test");
goto out;
@@ -202,6 +208,8 @@ static int __init test_gen_kretprobe_cmd(void)
kfree(buf);
return ret;
delete:
+ if (trace_event_file_is_valid(gen_kretprobe_test))
+ gen_kretprobe_test = NULL;
/* We got an error after creating the event, delete it */
ret = kprobe_event_delete("gen_kretprobe_test");
goto out;
@@ -217,10 +225,12 @@ static int __init kprobe_event_gen_test_init(void)
ret = test_gen_kretprobe_cmd();
if (ret) {
- WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
- "kprobes",
- "gen_kretprobe_test", false));
- trace_put_event_file(gen_kretprobe_test);
+ if (trace_event_file_is_valid(gen_kretprobe_test)) {
+ WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
+ "kprobes",
+ "gen_kretprobe_test", false));
+ trace_put_event_file(gen_kretprobe_test);
+ }
WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
}
@@ -229,24 +239,30 @@ static int __init kprobe_event_gen_test_init(void)
static void __exit kprobe_event_gen_test_exit(void)
{
- /* Disable the event or you can't remove it */
- WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
- "kprobes",
- "gen_kprobe_test", false));
+ if (trace_event_file_is_valid(gen_kprobe_test)) {
+ /* Disable the event or you can't remove it */
+ WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
+ "kprobes",
+ "gen_kprobe_test", false));
+
+ /* Now give the file and instance back */
+ trace_put_event_file(gen_kprobe_test);
+ }
- /* Now give the file and instance back */
- trace_put_event_file(gen_kprobe_test);
/* Now unregister and free the event */
WARN_ON(kprobe_event_delete("gen_kprobe_test"));
- /* Disable the event or you can't remove it */
- WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
- "kprobes",
- "gen_kretprobe_test", false));
+ if (trace_event_file_is_valid(gen_kretprobe_test)) {
+ /* Disable the event or you can't remove it */
+ WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
+ "kprobes",
+ "gen_kretprobe_test", false));
+
+ /* Now give the file and instance back */
+ trace_put_event_file(gen_kretprobe_test);
+ }
- /* Now give the file and instance back */
- trace_put_event_file(gen_kretprobe_test);
/* Now unregister and free the event */
WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c
index c69d82273ce7..32c3dfdb4d6a 100644
--- a/kernel/trace/rethook.c
+++ b/kernel/trace/rethook.c
@@ -83,8 +83,10 @@ struct rethook *rethook_alloc(void *data, rethook_handler_t handler)
{
struct rethook *rh = kzalloc(sizeof(struct rethook), GFP_KERNEL);
- if (!rh || !handler)
+ if (!rh || !handler) {
+ kfree(rh);
return NULL;
+ }
rh->data = data;
rh->handler = handler;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9712083832f4..b21bf14bae9b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -519,6 +519,7 @@ struct ring_buffer_per_cpu {
local_t committing;
local_t commits;
local_t pages_touched;
+ local_t pages_lost;
local_t pages_read;
long last_pages_touch;
size_t shortest_full;
@@ -894,10 +895,18 @@ size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
{
size_t read;
+ size_t lost;
size_t cnt;
read = local_read(&buffer->buffers[cpu]->pages_read);
+ lost = local_read(&buffer->buffers[cpu]->pages_lost);
cnt = local_read(&buffer->buffers[cpu]->pages_touched);
+
+ if (WARN_ON_ONCE(cnt < lost))
+ return 0;
+
+ cnt -= lost;
+
/* The reader can read an empty page, but not more than that */
if (cnt < read) {
WARN_ON_ONCE(read > cnt + 1);
@@ -907,6 +916,21 @@ size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
return cnt - read;
}
+static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
+{
+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ size_t nr_pages;
+ size_t dirty;
+
+ nr_pages = cpu_buffer->nr_pages;
+ if (!nr_pages || !full)
+ return true;
+
+ dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
+
+ return (dirty * 100) > (full * nr_pages);
+}
+
/*
* rb_wake_up_waiters - wake up tasks waiting for ring buffer input
*
@@ -1046,22 +1070,20 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
!ring_buffer_empty_cpu(buffer, cpu)) {
unsigned long flags;
bool pagebusy;
- size_t nr_pages;
- size_t dirty;
+ bool done;
if (!full)
break;
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
- nr_pages = cpu_buffer->nr_pages;
- dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
+ done = !pagebusy && full_hit(buffer, cpu, full);
+
if (!cpu_buffer->shortest_full ||
cpu_buffer->shortest_full > full)
cpu_buffer->shortest_full = full;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
- if (!pagebusy &&
- (!nr_pages || (dirty * 100) > full * nr_pages))
+ if (done)
break;
}
@@ -1087,6 +1109,7 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
* @cpu: the cpu buffer to wait on
* @filp: the file descriptor
* @poll_table: The poll descriptor
+ * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
*
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
* as data is added to any of the @buffer's cpu buffers. Otherwise
@@ -1096,14 +1119,15 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
* zero otherwise.
*/
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
- struct file *filp, poll_table *poll_table)
+ struct file *filp, poll_table *poll_table, int full)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct rb_irq_work *work;
- if (cpu == RING_BUFFER_ALL_CPUS)
+ if (cpu == RING_BUFFER_ALL_CPUS) {
work = &buffer->irq_work;
- else {
+ full = 0;
+ } else {
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return -EINVAL;
@@ -1111,8 +1135,14 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
work = &cpu_buffer->irq_work;
}
- poll_wait(filp, &work->waiters, poll_table);
- work->waiters_pending = true;
+ if (full) {
+ poll_wait(filp, &work->full_waiters, poll_table);
+ work->full_waiters_pending = true;
+ } else {
+ poll_wait(filp, &work->waiters, poll_table);
+ work->waiters_pending = true;
+ }
+
/*
* There's a tight race between setting the waiters_pending and
* checking if the ring buffer is empty. Once the waiters_pending bit
@@ -1128,6 +1158,9 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
*/
smp_mb();
+ if (full)
+ return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
+
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
return EPOLLIN | EPOLLRDNORM;
@@ -1769,9 +1802,9 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
free_buffer_page(cpu_buffer->reader_page);
- rb_head_page_deactivate(cpu_buffer);
-
if (head) {
+ rb_head_page_deactivate(cpu_buffer);
+
list_for_each_entry_safe(bpage, tmp, head, list) {
list_del_init(&bpage->list);
free_buffer_page(bpage);
@@ -2007,6 +2040,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
*/
local_add(page_entries, &cpu_buffer->overrun);
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+ local_inc(&cpu_buffer->pages_lost);
}
/*
@@ -2491,6 +2525,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
*/
local_add(entries, &cpu_buffer->overrun);
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+ local_inc(&cpu_buffer->pages_lost);
/*
* The entries will be zeroed out when we move the
@@ -3155,10 +3190,6 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
static __always_inline void
rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
{
- size_t nr_pages;
- size_t dirty;
- size_t full;
-
if (buffer->irq_work.waiters_pending) {
buffer->irq_work.waiters_pending = false;
/* irq_work_queue() supplies it's own memory barriers */
@@ -3182,10 +3213,7 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
- full = cpu_buffer->shortest_full;
- nr_pages = cpu_buffer->nr_pages;
- dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
- if (full && nr_pages && (dirty * 100) <= full * nr_pages)
+ if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
return;
cpu_buffer->irq_work.wakeup_full = true;
@@ -5248,6 +5276,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->committing, 0);
local_set(&cpu_buffer->commits, 0);
local_set(&cpu_buffer->pages_touched, 0);
+ local_set(&cpu_buffer->pages_lost, 0);
local_set(&cpu_buffer->pages_read, 0);
cpu_buffer->last_pages_touch = 0;
cpu_buffer->shortest_full = 0;
diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c
index 0b15e975d2c2..8d77526892f4 100644
--- a/kernel/trace/synth_event_gen_test.c
+++ b/kernel/trace/synth_event_gen_test.c
@@ -120,15 +120,13 @@ static int __init test_gen_synth_cmd(void)
/* Now generate a gen_synth_test event */
ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals));
- out:
+ free:
+ kfree(buf);
return ret;
delete:
/* We got an error after creating the event, delete it */
synth_event_delete("gen_synth_test");
- free:
- kfree(buf);
-
- goto out;
+ goto free;
}
/*
@@ -227,15 +225,13 @@ static int __init test_empty_synth_event(void)
/* Now trace an empty_synth_test event */
ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals));
- out:
+ free:
+ kfree(buf);
return ret;
delete:
/* We got an error after creating the event, delete it */
synth_event_delete("empty_synth_test");
- free:
- kfree(buf);
-
- goto out;
+ goto free;
}
static struct synth_field_desc create_synth_test_fields[] = {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 47a44b055a1d..5cfc95a52bc3 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2180,10 +2180,12 @@ void tracing_reset_online_cpus(struct array_buffer *buf)
}
/* Must have trace_types_lock held */
-void tracing_reset_all_online_cpus(void)
+void tracing_reset_all_online_cpus_unlocked(void)
{
struct trace_array *tr;
+ lockdep_assert_held(&trace_types_lock);
+
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (!tr->clear_trace)
continue;
@@ -2195,6 +2197,13 @@ void tracing_reset_all_online_cpus(void)
}
}
+void tracing_reset_all_online_cpus(void)
+{
+ mutex_lock(&trace_types_lock);
+ tracing_reset_all_online_cpus_unlocked();
+ mutex_unlock(&trace_types_lock);
+}
+
/*
* The tgid_map array maps from pid to tgid; i.e. the value stored at index i
* is the tgid last observed corresponding to pid=i.
@@ -6657,6 +6666,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
mutex_unlock(&trace_types_lock);
free_cpumask_var(iter->started);
+ kfree(iter->fmt);
mutex_destroy(&iter->mutex);
kfree(iter);
@@ -6681,7 +6691,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
return EPOLLIN | EPOLLRDNORM;
else
return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
- filp, poll_table);
+ filp, poll_table, iter->tr->buffer_percent);
}
static __poll_t
@@ -7802,6 +7812,7 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
int len)
{
struct tracing_log_err *err;
+ char *cmd;
if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
err = alloc_tracing_log_err(len);
@@ -7810,12 +7821,12 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
return err;
}
-
+ cmd = kzalloc(len, GFP_KERNEL);
+ if (!cmd)
+ return ERR_PTR(-ENOMEM);
err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
kfree(err->cmd);
- err->cmd = kzalloc(len, GFP_KERNEL);
- if (!err->cmd)
- return ERR_PTR(-ENOMEM);
+ err->cmd = cmd;
list_del(&err->list);
return err;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 54ee5711c729..d42e24507152 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -580,6 +580,7 @@ int tracing_is_enabled(void);
void tracing_reset_online_cpus(struct array_buffer *buf);
void tracing_reset_current(int cpu);
void tracing_reset_all_online_cpus(void);
+void tracing_reset_all_online_cpus_unlocked(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
int tracing_open_generic_tr(struct inode *inode, struct file *filp);
bool tracing_is_disabled(void);
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
index 154996684fb5..4376887e0d8a 100644
--- a/kernel/trace/trace_dynevent.c
+++ b/kernel/trace/trace_dynevent.c
@@ -118,6 +118,7 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
if (ret)
break;
}
+ tracing_reset_all_online_cpus();
mutex_unlock(&event_mutex);
out:
argv_free(argv);
@@ -214,6 +215,7 @@ int dyn_events_release_all(struct dyn_event_operations *type)
break;
}
out:
+ tracing_reset_all_online_cpus();
mutex_unlock(&event_mutex);
return ret;
diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
index 5dd0617e5df6..352b65e2b910 100644
--- a/kernel/trace/trace_eprobe.c
+++ b/kernel/trace/trace_eprobe.c
@@ -52,6 +52,7 @@ static void trace_event_probe_cleanup(struct trace_eprobe *ep)
kfree(ep->event_system);
if (ep->event)
trace_event_put_ref(ep->event);
+ kfree(ep->filter_str);
kfree(ep);
}
@@ -563,6 +564,9 @@ static void eprobe_trigger_func(struct event_trigger_data *data,
{
struct eprobe_data *edata = data->private_data;
+ if (unlikely(!rec))
+ return;
+
__eprobe_trace_func(edata, rec);
}
@@ -642,7 +646,7 @@ new_eprobe_trigger(struct trace_eprobe *ep, struct trace_event_file *file)
INIT_LIST_HEAD(&trigger->list);
if (ep->filter_str) {
- ret = create_event_filter(file->tr, file->event_call,
+ ret = create_event_filter(file->tr, ep->event,
ep->filter_str, false, &filter);
if (ret)
goto error;
@@ -900,7 +904,7 @@ static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[
static int trace_eprobe_parse_filter(struct trace_eprobe *ep, int argc, const char *argv[])
{
- struct event_filter *dummy;
+ struct event_filter *dummy = NULL;
int i, ret, len = 0;
char *p;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 0356cae0cf74..f71ea6e79b3c 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2880,7 +2880,10 @@ static int probe_remove_event_call(struct trace_event_call *call)
* TRACE_REG_UNREGISTER.
*/
if (file->flags & EVENT_FILE_FL_ENABLED)
- return -EBUSY;
+ goto busy;
+
+ if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
+ tr->clear_trace = true;
/*
* The do_for_each_event_file_safe() is
* a double loop. After finding the call for this
@@ -2893,6 +2896,12 @@ static int probe_remove_event_call(struct trace_event_call *call)
__trace_remove_event_call(call);
return 0;
+ busy:
+ /* No need to clear the trace now */
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ tr->clear_trace = false;
+ }
+ return -EBUSY;
}
/* Remove an event_call */
@@ -2972,7 +2981,7 @@ static void trace_module_remove_events(struct module *mod)
* over from this module may be passed to the new module events and
* unexpected results may occur.
*/
- tracing_reset_all_online_cpus();
+ tracing_reset_all_online_cpus_unlocked();
}
static int trace_module_notify(struct notifier_block *self,
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 48465f7e97b4..1c82478e8dff 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -983,7 +983,7 @@ static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
* A trigger can define one or more variables. If any one of them is
* currently referenced by any other trigger, this function will
* determine that.
-
+ *
* Typically used to determine whether or not a trigger can be removed
* - if there are any references to a trigger's variables, it cannot.
*
@@ -3226,7 +3226,7 @@ static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
* events. However, for convenience, users are allowed to directly
* specify an event field in an action, which will be automatically
* converted into a variable on their behalf.
-
+ *
* This function creates a field variable with the name var_name on
* the hist trigger currently being defined on the target event. If
* subsys_name and event_name are specified, this function simply
@@ -5143,6 +5143,9 @@ static void event_hist_trigger(struct event_trigger_data *data,
void *key = NULL;
unsigned int i;
+ if (unlikely(!rbe))
+ return;
+
memset(compound_key, 0, hist_data->key_size);
for_each_hist_key_field(i, hist_data) {
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index e310052dc83c..c3b582d19b62 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -828,10 +828,9 @@ static int register_synth_event(struct synth_event *event)
}
ret = set_synth_event_print_fmt(call);
- if (ret < 0) {
+ /* unregister_trace_event() will be called inside */
+ if (ret < 0)
trace_remove_event_call(call);
- goto err;
- }
out:
return ret;
err:
@@ -1426,7 +1425,6 @@ int synth_event_delete(const char *event_name)
mutex_unlock(&event_mutex);
if (mod) {
- mutex_lock(&trace_types_lock);
/*
* It is safest to reset the ring buffer if the module
* being unloaded registered any events that were
@@ -1438,7 +1436,6 @@ int synth_event_delete(const char *event_name)
* occur.
*/
tracing_reset_all_online_cpus();
- mutex_unlock(&trace_types_lock);
}
return ret;
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index ae78c2d53c8a..539b08ae7020 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -1100,8 +1100,10 @@ static int user_event_create(const char *raw_command)
group = current_user_event_group();
- if (!group)
+ if (!group) {
+ kfree(name);
return -ENOENT;
+ }
mutex_lock(&group->reg_mutex);
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index 78d536d3ff3d..4300c5dc4e5d 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -917,7 +917,7 @@ void osnoise_trace_irq_entry(int id)
void osnoise_trace_irq_exit(int id, const char *desc)
{
struct osnoise_variables *osn_var = this_cpu_osn_var();
- int duration;
+ s64 duration;
if (!osn_var->sampling)
return;
@@ -1048,7 +1048,7 @@ static void trace_softirq_entry_callback(void *data, unsigned int vec_nr)
static void trace_softirq_exit_callback(void *data, unsigned int vec_nr)
{
struct osnoise_variables *osn_var = this_cpu_osn_var();
- int duration;
+ s64 duration;
if (!osn_var->sampling)
return;
@@ -1144,7 +1144,7 @@ thread_entry(struct osnoise_variables *osn_var, struct task_struct *t)
static void
thread_exit(struct osnoise_variables *osn_var, struct task_struct *t)
{
- int duration;
+ s64 duration;
if (!osn_var->sampling)
return;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index b69e207012c9..942ddbdace4a 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -201,8 +201,6 @@ print_syscall_exit(struct trace_iterator *iter, int flags,
return trace_handle_return(s);
}
-extern char *__bad_type_size(void);
-
#define SYSCALL_FIELD(_type, _name) { \
.type = #_type, .name = #_name, \
.size = sizeof(_type), .align = __alignof__(_type), \
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c3c0b077ade3..3638b3424be5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -399,6 +399,7 @@ config FRAME_WARN
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 2048 if PARISC
default 1536 if (!64BIT && XTENSA)
+ default 1280 if KASAN && !64BIT
default 1024 if !64BIT
default 2048 if 64BIT
help
@@ -1874,8 +1875,14 @@ config NETDEV_NOTIFIER_ERROR_INJECT
If unsure, say N.
config FUNCTION_ERROR_INJECTION
- def_bool y
+ bool "Fault-injections of functions"
depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
+ help
+ Add fault injections into various functions that are annotated with
+ ALLOW_ERROR_INJECTION() in the kernel. BPF may also modify the return
+ value of theses functions. This is useful to test error paths of code.
+
+ If unsure, say N
config FAULT_INJECTION
bool "Fault-injection framework"
@@ -2107,6 +2114,7 @@ config KPROBES_SANITY_TEST
depends on DEBUG_KERNEL
depends on KPROBES
depends on KUNIT
+ select STACKTRACE if ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
default KUNIT_ALL_TESTS
help
This option provides for testing basic kprobes functionality on
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 96e092de5b72..adb2f9355ee6 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -41,9 +41,6 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
static void fail_dump(struct fault_attr *attr)
{
- if (attr->no_warn)
- return;
-
if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
"name %pd, interval %lu, probability %lu, "
@@ -103,7 +100,7 @@ static inline bool fail_stacktrace(struct fault_attr *attr)
* http://www.nongnu.org/failmalloc/
*/
-bool should_fail(struct fault_attr *attr, ssize_t size)
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
{
if (in_task()) {
unsigned int fail_nth = READ_ONCE(current->fail_nth);
@@ -146,13 +143,19 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
return false;
fail:
- fail_dump(attr);
+ if (!(flags & FAULT_NOWARN))
+ fail_dump(attr);
if (atomic_read(&attr->times) != -1)
atomic_dec_not_zero(&attr->times);
return true;
}
+
+bool should_fail(struct fault_attr *attr, ssize_t size)
+{
+ return should_fail_ex(attr, size, 0);
+}
EXPORT_SYMBOL_GPL(should_fail);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile
index c415a685d61b..e814061d6aa0 100644
--- a/lib/vdso/Makefile
+++ b/lib/vdso/Makefile
@@ -17,6 +17,6 @@ $(error ARCH_REL_TYPE_ABS is not set)
endif
quiet_cmd_vdso_check = VDSOCHK $@
- cmd_vdso_check = if $(OBJDUMP) -R $@ | egrep -h "$(ARCH_REL_TYPE_ABS)"; \
+ cmd_vdso_check = if $(OBJDUMP) -R $@ | grep -E -h "$(ARCH_REL_TYPE_ABS)"; \
then (echo >&2 "$@: dynamic relocations are not supported"; \
rm -f $@; /bin/false); fi
diff --git a/mm/compaction.c b/mm/compaction.c
index c51f7f545afe..1f6da31dd9a5 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -985,28 +985,28 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
}
/*
+ * Be careful not to clear PageLRU until after we're
+ * sure the page is not being freed elsewhere -- the
+ * page release code relies on it.
+ */
+ if (unlikely(!get_page_unless_zero(page)))
+ goto isolate_fail;
+
+ /*
* Migration will fail if an anonymous page is pinned in memory,
* so avoid taking lru_lock and isolating it unnecessarily in an
* admittedly racy check.
*/
mapping = page_mapping(page);
- if (!mapping && page_count(page) > page_mapcount(page))
- goto isolate_fail;
+ if (!mapping && (page_count(page) - 1) > total_mapcount(page))
+ goto isolate_fail_put;
/*
* Only allow to migrate anonymous pages in GFP_NOFS context
* because those do not depend on fs locks.
*/
if (!(cc->gfp_mask & __GFP_FS) && mapping)
- goto isolate_fail;
-
- /*
- * Be careful not to clear PageLRU until after we're
- * sure the page is not being freed elsewhere -- the
- * page release code relies on it.
- */
- if (unlikely(!get_page_unless_zero(page)))
- goto isolate_fail;
+ goto isolate_fail_put;
/* Only take pages on LRU: a check now makes later tests safe */
if (!PageLRU(page))
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 9f1219a67e3f..07e5f1bdf025 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -2283,12 +2283,54 @@ static struct damos *damon_sysfs_mk_scheme(
&wmarks);
}
+static void damon_sysfs_update_scheme(struct damos *scheme,
+ struct damon_sysfs_scheme *sysfs_scheme)
+{
+ struct damon_sysfs_access_pattern *access_pattern =
+ sysfs_scheme->access_pattern;
+ struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
+ struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
+ struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
+
+ scheme->pattern.min_sz_region = access_pattern->sz->min;
+ scheme->pattern.max_sz_region = access_pattern->sz->max;
+ scheme->pattern.min_nr_accesses = access_pattern->nr_accesses->min;
+ scheme->pattern.max_nr_accesses = access_pattern->nr_accesses->max;
+ scheme->pattern.min_age_region = access_pattern->age->min;
+ scheme->pattern.max_age_region = access_pattern->age->max;
+
+ scheme->action = sysfs_scheme->action;
+
+ scheme->quota.ms = sysfs_quotas->ms;
+ scheme->quota.sz = sysfs_quotas->sz;
+ scheme->quota.reset_interval = sysfs_quotas->reset_interval_ms;
+ scheme->quota.weight_sz = sysfs_weights->sz;
+ scheme->quota.weight_nr_accesses = sysfs_weights->nr_accesses;
+ scheme->quota.weight_age = sysfs_weights->age;
+
+ scheme->wmarks.metric = sysfs_wmarks->metric;
+ scheme->wmarks.interval = sysfs_wmarks->interval_us;
+ scheme->wmarks.high = sysfs_wmarks->high;
+ scheme->wmarks.mid = sysfs_wmarks->mid;
+ scheme->wmarks.low = sysfs_wmarks->low;
+}
+
static int damon_sysfs_set_schemes(struct damon_ctx *ctx,
struct damon_sysfs_schemes *sysfs_schemes)
{
- int i;
+ struct damos *scheme, *next;
+ int i = 0;
- for (i = 0; i < sysfs_schemes->nr; i++) {
+ damon_for_each_scheme_safe(scheme, next, ctx) {
+ if (i < sysfs_schemes->nr)
+ damon_sysfs_update_scheme(scheme,
+ sysfs_schemes->schemes_arr[i]);
+ else
+ damon_destroy_scheme(scheme);
+ i++;
+ }
+
+ for (; i < sysfs_schemes->nr; i++) {
struct damos *scheme, *next;
scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]);
@@ -2339,6 +2381,10 @@ static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond)
damon_for_each_scheme(scheme, ctx) {
struct damon_sysfs_stats *sysfs_stats;
+ /* user could have removed the scheme sysfs dir */
+ if (schemes_idx >= sysfs_schemes->nr)
+ break;
+
sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats;
sysfs_stats->nr_tried = scheme->stat.nr_tried;
sysfs_stats->sz_tried = scheme->stat.sz_tried;
diff --git a/mm/failslab.c b/mm/failslab.c
index 58df9789f1d2..ffc420c0e767 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -16,6 +16,8 @@ static struct {
bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
+ int flags = 0;
+
/* No fault-injection for bootstrap cache */
if (unlikely(s == kmem_cache))
return false;
@@ -30,10 +32,16 @@ bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB))
return false;
+ /*
+ * In some cases, it expects to specify __GFP_NOWARN
+ * to avoid printing any information(not just a warning),
+ * thus avoiding deadlocks. See commit 6b9dbedbe349 for
+ * details.
+ */
if (gfpflags & __GFP_NOWARN)
- failslab.attr.no_warn = true;
+ flags |= FAULT_NOWARN;
- return should_fail(&failslab.attr, s->object_size);
+ return should_fail_ex(&failslab.attr, s->object_size, flags);
}
static int __init setup_failslab(char *str)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e48f8ef45b17..e36ca75311a5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1800,6 +1800,7 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
/* we rely on prep_new_huge_page to set the destructor */
set_compound_order(page, order);
+ __ClearPageReserved(page);
__SetPageHead(page);
for (i = 0; i < nr_pages; i++) {
p = nth_page(page, i);
@@ -1816,7 +1817,8 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
* on the head page when they need know if put_page() is needed
* after get_user_pages().
*/
- __ClearPageReserved(p);
+ if (i != 0) /* head page cleared above */
+ __ClearPageReserved(p);
/*
* Subtle and very unlikely
*
@@ -5204,17 +5206,22 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
__unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
- /*
- * Unlock and free the vma lock before releasing i_mmap_rwsem. When
- * the vma_lock is freed, this makes the vma ineligible for pmd
- * sharing. And, i_mmap_rwsem is required to set up pmd sharing.
- * This is important as page tables for this unmapped range will
- * be asynchrously deleted. If the page tables are shared, there
- * will be issues when accessed by someone else.
- */
- __hugetlb_vma_unlock_write_free(vma);
-
- i_mmap_unlock_write(vma->vm_file->f_mapping);
+ if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
+ /*
+ * Unlock and free the vma lock before releasing i_mmap_rwsem.
+ * When the vma_lock is freed, this makes the vma ineligible
+ * for pmd sharing. And, i_mmap_rwsem is required to set up
+ * pmd sharing. This is important as page tables for this
+ * unmapped range will be asynchrously deleted. If the page
+ * tables are shared, there will be issues when accessed by
+ * someone else.
+ */
+ __hugetlb_vma_unlock_write_free(vma);
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
+ } else {
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
+ hugetlb_vma_unlock_write(vma);
+ }
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
diff --git a/mm/kfence/report.c b/mm/kfence/report.c
index 7e496856c2eb..46ecea18c4ca 100644
--- a/mm/kfence/report.c
+++ b/mm/kfence/report.c
@@ -75,18 +75,23 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries
if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfence_") ||
str_has_prefix(buf, ARCH_FUNC_PREFIX "__kfence_") ||
+ str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmem_cache_free") ||
!strncmp(buf, ARCH_FUNC_PREFIX "__slab_free", len)) {
/*
- * In case of tail calls from any of the below
- * to any of the above.
+ * In case of tail calls from any of the below to any of
+ * the above, optimized by the compiler such that the
+ * stack trace would omit the initial entry point below.
*/
fallback = skipnr + 1;
}
- /* Also the *_bulk() variants by only checking prefixes. */
+ /*
+ * The below list should only include the initial entry points
+ * into the slab allocators. Includes the *_bulk() variants by
+ * checking prefixes.
+ */
if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfree") ||
str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_free") ||
- str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmem_cache_free") ||
str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmalloc") ||
str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_alloc"))
goto found;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 4734315f7940..3703a56571c1 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -97,8 +97,8 @@ struct collapse_control {
/* Num pages scanned per node */
u32 node_load[MAX_NUMNODES];
- /* Last target selected in hpage_collapse_find_target_node() */
- int last_target_node;
+ /* nodemask for allocation fallback */
+ nodemask_t alloc_nmask;
};
/**
@@ -734,7 +734,6 @@ static void khugepaged_alloc_sleep(void)
struct collapse_control khugepaged_collapse_control = {
.is_khugepaged = true,
- .last_target_node = NUMA_NO_NODE,
};
static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
@@ -783,16 +782,11 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
target_node = nid;
}
- /* do some balance if several nodes have the same hit record */
- if (target_node <= cc->last_target_node)
- for (nid = cc->last_target_node + 1; nid < MAX_NUMNODES;
- nid++)
- if (max_value == cc->node_load[nid]) {
- target_node = nid;
- break;
- }
+ for_each_online_node(nid) {
+ if (max_value == cc->node_load[nid])
+ node_set(nid, cc->alloc_nmask);
+ }
- cc->last_target_node = target_node;
return target_node;
}
#else
@@ -802,9 +796,10 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
}
#endif
-static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node)
+static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
+ nodemask_t *nmask)
{
- *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
+ *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
return false;
@@ -955,12 +950,11 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
struct collapse_control *cc)
{
- /* Only allocate from the target node */
gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
- GFP_TRANSHUGE) | __GFP_THISNODE;
+ GFP_TRANSHUGE);
int node = hpage_collapse_find_target_node(cc);
- if (!hpage_collapse_alloc_page(hpage, gfp, node))
+ if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
return SCAN_ALLOC_HUGE_PAGE_FAIL;
if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
return SCAN_CGROUP_CHARGE_FAIL;
@@ -1057,6 +1051,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
_pmd = pmdp_collapse_flush(vma, address, pmd);
spin_unlock(pmd_ptl);
mmu_notifier_invalidate_range_end(&range);
+ tlb_remove_table_sync_one();
spin_lock(pte_ptl);
result = __collapse_huge_page_isolate(vma, address, pte, cc,
@@ -1144,6 +1139,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
goto out;
memset(cc->node_load, 0, sizeof(cc->node_load));
+ nodes_clear(cc->alloc_nmask);
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
_pte++, _address += PAGE_SIZE) {
@@ -1384,16 +1380,43 @@ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
return SCAN_SUCCEED;
}
+/*
+ * A note about locking:
+ * Trying to take the page table spinlocks would be useless here because those
+ * are only used to synchronize:
+ *
+ * - modifying terminal entries (ones that point to a data page, not to another
+ * page table)
+ * - installing *new* non-terminal entries
+ *
+ * Instead, we need roughly the same kind of protection as free_pgtables() or
+ * mm_take_all_locks() (but only for a single VMA):
+ * The mmap lock together with this VMA's rmap locks covers all paths towards
+ * the page table entries we're messing with here, except for hardware page
+ * table walks and lockless_pages_from_mm().
+ */
static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
- spinlock_t *ptl;
pmd_t pmd;
+ struct mmu_notifier_range range;
mmap_assert_write_locked(mm);
- ptl = pmd_lock(vma->vm_mm, pmdp);
+ if (vma->vm_file)
+ lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
+ /*
+ * All anon_vmas attached to the VMA have the same root and are
+ * therefore locked by the same lock.
+ */
+ if (vma->anon_vma)
+ lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
+
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, addr,
+ addr + HPAGE_PMD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
pmd = pmdp_collapse_flush(vma, addr, pmdp);
- spin_unlock(ptl);
+ tlb_remove_table_sync_one();
+ mmu_notifier_invalidate_range_end(&range);
mm_dec_nr_ptes(mm);
page_table_check_pte_clear_range(mm, addr, pmd);
pte_free(mm, pmd_pgtable(pmd));
@@ -1444,6 +1467,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
return SCAN_VMA_CHECK;
+ /*
+ * Symmetry with retract_page_tables(): Exclude MAP_PRIVATE mappings
+ * that got written to. Without this, we'd have to also lock the
+ * anon_vma if one exists.
+ */
+ if (vma->anon_vma)
+ return SCAN_VMA_CHECK;
+
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
if (userfaultfd_wp(vma))
return SCAN_PTE_UFFD_WP;
@@ -1477,6 +1508,20 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
goto drop_hpage;
}
+ /*
+ * We need to lock the mapping so that from here on, only GUP-fast and
+ * hardware page walks can access the parts of the page tables that
+ * we're operating on.
+ * See collapse_and_free_pmd().
+ */
+ i_mmap_lock_write(vma->vm_file->f_mapping);
+
+ /*
+ * This spinlock should be unnecessary: Nobody else should be accessing
+ * the page tables under spinlock protection here, only
+ * lockless_pages_from_mm() and the hardware page walker can access page
+ * tables while all the high-level locks are held in write mode.
+ */
start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
result = SCAN_FAIL;
@@ -1531,6 +1576,8 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
/* step 4: remove pte entries */
collapse_and_free_pmd(mm, vma, haddr, pmd);
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
+
maybe_install_pmd:
/* step 5: install pmd entry */
result = install_pmd
@@ -1544,6 +1591,7 @@ drop_hpage:
abort:
pte_unmap_unlock(start_pte, ptl);
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
goto drop_hpage;
}
@@ -1600,7 +1648,8 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
* An alternative would be drop the check, but check that page
* table is clear before calling pmdp_collapse_flush() under
* ptl. It has higher chance to recover THP for the VMA, but
- * has higher cost too.
+ * has higher cost too. It would also probably require locking
+ * the anon_vma.
*/
if (vma->anon_vma) {
result = SCAN_PAGE_ANON;
@@ -2077,6 +2126,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
present = 0;
swap = 0;
memset(cc->node_load, 0, sizeof(cc->node_load));
+ nodes_clear(cc->alloc_nmask);
rcu_read_lock();
xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
if (xas_retry(&xas, page))
@@ -2157,8 +2207,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
}
}
- trace_mm_khugepaged_scan_file(mm, page, file->f_path.dentry->d_iname,
- present, swap, result);
+ trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
return result;
}
#else
@@ -2576,7 +2625,6 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
if (!cc)
return -ENOMEM;
cc->is_khugepaged = false;
- cc->last_target_node = NUMA_NO_NODE;
mmgrab(mm);
lru_add_drain_all();
@@ -2602,6 +2650,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
}
mmap_assert_locked(mm);
memset(cc->node_load, 0, sizeof(cc->node_load));
+ nodes_clear(cc->alloc_nmask);
if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
struct file *file = get_file(vma->vm_file);
pgoff_t pgoff = linear_page_index(vma, addr);
diff --git a/mm/maccess.c b/mm/maccess.c
index 5f4d240f67ec..074f6b086671 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -97,7 +97,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
return src - unsafe_addr;
Efault:
pagefault_enable();
- dst[-1] = '\0';
+ dst[0] = '\0';
return -EFAULT;
}
diff --git a/mm/madvise.c b/mm/madvise.c
index c7105ec6d08c..b913ba6efc10 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -772,8 +772,8 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
* Application no longer needs these pages. If the pages are dirty,
* it's OK to just throw them away. The app will be more careful about
* data it wants to keep. Be sure to free swap resources too. The
- * zap_page_range call sets things up for shrink_active_list to actually free
- * these pages later if no one else has touched them in the meantime,
+ * zap_page_range_single call sets things up for shrink_active_list to actually
+ * free these pages later if no one else has touched them in the meantime,
* although we could add these pages to a global reuse list for
* shrink_active_list to pick up before reclaiming other pages.
*
@@ -790,7 +790,7 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- zap_page_range(vma, start, end - start);
+ zap_page_range_single(vma, start, end - start, NULL);
return 0;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2d8549ae1b30..a1a35c12635e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3026,7 +3026,7 @@ struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
{
struct obj_cgroup *objcg;
- if (!memcg_kmem_enabled() || memcg_kmem_bypass())
+ if (!memcg_kmem_enabled())
return NULL;
if (PageMemcgKmem(page)) {
diff --git a/mm/memory.c b/mm/memory.c
index f88c351aecd4..8c8420934d60 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1341,15 +1341,6 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
return ret;
}
-/*
- * Parameter block passed down to zap_pte_range in exceptional cases.
- */
-struct zap_details {
- struct folio *single_folio; /* Locked folio to be unmapped */
- bool even_cows; /* Zap COWed private pages too? */
- zap_flags_t zap_flags; /* Extra flags for zapping */
-};
-
/* Whether we should zap all COWed (private) pages too */
static inline bool should_zap_cows(struct zap_details *details)
{
@@ -1720,7 +1711,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
{
struct mmu_notifier_range range;
struct zap_details details = {
- .zap_flags = ZAP_FLAG_DROP_MARKER,
+ .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
/* Careful - we need to zap private pages too! */
.even_cows = true,
};
@@ -1774,19 +1765,27 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
*
* The range must fit into one VMA.
*/
-static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
+void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details)
{
+ const unsigned long end = address + size;
struct mmu_notifier_range range;
struct mmu_gather tlb;
lru_add_drain();
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
- address, address + size);
+ address, end);
+ if (is_vm_hugetlb_page(vma))
+ adjust_range_if_pmd_sharing_possible(vma, &range.start,
+ &range.end);
tlb_gather_mmu(&tlb, vma->vm_mm);
update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range);
- unmap_single_vma(&tlb, vma, address, range.end, details);
+ /*
+ * unmap 'address-end' not 'range.start-range.end' as range
+ * could have been expanded for hugetlb pmd sharing.
+ */
+ unmap_single_vma(&tlb, vma, address, end, details);
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb);
}
@@ -3763,7 +3762,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
*/
get_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl);
- vmf->page->pgmap->ops->migrate_to_ram(vmf);
+ ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
put_page(vmf->page);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 6fa682eef7a0..721b2365dbca 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -357,7 +357,8 @@ static bool migrate_vma_check_page(struct page *page, struct page *fault_page)
}
/*
- * Unmaps pages for migration. Returns number of unmapped pages.
+ * Unmaps pages for migration. Returns number of source pfns marked as
+ * migrating.
*/
static unsigned long migrate_device_unmap(unsigned long *src_pfns,
unsigned long npages,
@@ -373,8 +374,11 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
struct page *page = migrate_pfn_to_page(src_pfns[i]);
struct folio *folio;
- if (!page)
+ if (!page) {
+ if (src_pfns[i] & MIGRATE_PFN_MIGRATE)
+ unmapped++;
continue;
+ }
/* ZONE_DEVICE pages are not on LRU */
if (!is_zone_device_page(page)) {
diff --git a/mm/mmap.c b/mm/mmap.c
index c3c5c1d6103d..a5eb2f175da0 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -456,7 +456,7 @@ void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
* vma_mas_szero() - Set a given range to zero. Used when modifying a
* vm_area_struct start or end.
*
- * @mm: The struct_mm
+ * @mas: The maple tree ma_state
* @start: The start address to zero
* @end: The end address to zero.
*/
@@ -1779,9 +1779,6 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
*/
pgoff = 0;
get_area = shmem_get_unmapped_area;
- } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
- /* Ensures that larger anonymous mappings are THP aligned. */
- get_area = thp_get_unmapped_area;
}
addr = get_area(file, addr, len, pgoff, flags);
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index add4244e5790..3a2c3f8cad2f 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -153,7 +153,7 @@ static void tlb_remove_table_smp_sync(void *arg)
/* Simply deliver the interrupt */
}
-static void tlb_remove_table_sync_one(void)
+void tlb_remove_table_sync_one(void)
{
/*
* This isn't an RCU grace period and hence the page-tables cannot be
@@ -177,8 +177,6 @@ static void tlb_remove_table_free(struct mmu_table_batch *batch)
#else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
-static void tlb_remove_table_sync_one(void) { }
-
static void tlb_remove_table_free(struct mmu_table_batch *batch)
{
__tlb_remove_table_free(batch);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 218b28ee49ed..6e60657875d3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3887,6 +3887,8 @@ __setup("fail_page_alloc=", setup_fail_page_alloc);
static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
+ int flags = 0;
+
if (order < fail_page_alloc.min_order)
return false;
if (gfp_mask & __GFP_NOFAIL)
@@ -3897,10 +3899,11 @@ static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
(gfp_mask & __GFP_DIRECT_RECLAIM))
return false;
+ /* See comment in __should_failslab() */
if (gfp_mask & __GFP_NOWARN)
- fail_page_alloc.attr.no_warn = true;
+ flags |= FAULT_NOWARN;
- return should_fail(&fail_page_alloc.attr, 1 << order);
+ return should_fail_ex(&fail_page_alloc.attr, 1 << order, flags);
}
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
diff --git a/mm/page_ext.c b/mm/page_ext.c
index affe80243b6d..ddf1968560f0 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -166,7 +166,7 @@ struct page_ext *page_ext_get(struct page *page)
/**
* page_ext_put() - Working with page extended information is done.
- * @page_ext - Page extended information received from page_ext_get().
+ * @page_ext: Page extended information received from page_ext_get().
*
* The page extended information of the page may not be valid after this
* function is called.
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 5fc1237a9f21..72e481aacd5d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -973,23 +973,23 @@ done:
scan:
spin_unlock(&si->lock);
while (++offset <= READ_ONCE(si->highest_bit)) {
- if (swap_offset_available_and_locked(si, offset))
- goto checks;
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;
scanned_many = true;
}
+ if (swap_offset_available_and_locked(si, offset))
+ goto checks;
}
offset = si->lowest_bit;
while (offset < scan_base) {
- if (swap_offset_available_and_locked(si, offset))
- goto checks;
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;
scanned_many = true;
}
+ if (swap_offset_available_and_locked(si, offset))
+ goto checks;
offset++;
}
spin_lock(&si->lock);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 04d8b88e5216..8fcc5fa768c0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2514,8 +2514,20 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
* the flushers simply cannot keep up with the allocation
* rate. Nudge the flusher threads in case they are asleep.
*/
- if (stat.nr_unqueued_dirty == nr_taken)
+ if (stat.nr_unqueued_dirty == nr_taken) {
wakeup_flusher_threads(WB_REASON_VMSCAN);
+ /*
+ * For cgroupv1 dirty throttling is achieved by waking up
+ * the kernel flusher here and later waiting on folios
+ * which are in writeback to finish (see shrink_folio_list()).
+ *
+ * Flusher may not be able to issue writeback quickly
+ * enough for cgroupv1 writeback throttling to work
+ * on a large system.
+ */
+ if (!writeback_throttling_sane(sc))
+ reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
+ }
sc->nr.dirty += stat.nr_dirty;
sc->nr.congested += stat.nr_congested;
@@ -3975,7 +3987,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area
goto next;
if (!pmd_trans_huge(pmd[i])) {
- if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) &&
+ if (arch_has_hw_nonleaf_pmd_young() &&
get_cap(LRU_GEN_NONLEAF_YOUNG))
pmdp_test_and_clear_young(vma, addr, pmd + i);
goto next;
@@ -4073,14 +4085,14 @@ restart:
#endif
walk->mm_stats[MM_NONLEAF_TOTAL]++;
-#ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG
- if (get_cap(LRU_GEN_NONLEAF_YOUNG)) {
+ if (arch_has_hw_nonleaf_pmd_young() &&
+ get_cap(LRU_GEN_NONLEAF_YOUNG)) {
if (!pmd_young(val))
continue;
walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos);
}
-#endif
+
if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i))
continue;
@@ -4971,10 +4983,13 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
int scanned;
int reclaimed;
LIST_HEAD(list);
+ LIST_HEAD(clean);
struct folio *folio;
+ struct folio *next;
enum vm_event_item item;
struct reclaim_stat stat;
struct lru_gen_mm_walk *walk;
+ bool skip_retry = false;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
@@ -4991,20 +5006,37 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
if (list_empty(&list))
return scanned;
-
+retry:
reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false);
+ sc->nr_reclaimed += reclaimed;
- list_for_each_entry(folio, &list, lru) {
- /* restore LRU_REFS_FLAGS cleared by isolate_folio() */
- if (folio_test_workingset(folio))
- folio_set_referenced(folio);
+ list_for_each_entry_safe_reverse(folio, next, &list, lru) {
+ if (!folio_evictable(folio)) {
+ list_del(&folio->lru);
+ folio_putback_lru(folio);
+ continue;
+ }
- /* don't add rejected pages to the oldest generation */
if (folio_test_reclaim(folio) &&
- (folio_test_dirty(folio) || folio_test_writeback(folio)))
- folio_clear_active(folio);
- else
- folio_set_active(folio);
+ (folio_test_dirty(folio) || folio_test_writeback(folio))) {
+ /* restore LRU_REFS_FLAGS cleared by isolate_folio() */
+ if (folio_test_workingset(folio))
+ folio_set_referenced(folio);
+ continue;
+ }
+
+ if (skip_retry || folio_test_active(folio) || folio_test_referenced(folio) ||
+ folio_mapped(folio) || folio_test_locked(folio) ||
+ folio_test_dirty(folio) || folio_test_writeback(folio)) {
+ /* don't add rejected folios to the oldest generation */
+ set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS,
+ BIT(PG_active));
+ continue;
+ }
+
+ /* retry folios that may have missed folio_rotate_reclaimable() */
+ list_move(&folio->lru, &clean);
+ sc->nr_scanned -= folio_nr_pages(folio);
}
spin_lock_irq(&lruvec->lru_lock);
@@ -5026,7 +5058,13 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
mem_cgroup_uncharge_list(&list);
free_unref_page_list(&list);
- sc->nr_reclaimed += reclaimed;
+ INIT_LIST_HEAD(&list);
+ list_splice_init(&clean, &list);
+
+ if (!list_empty(&list)) {
+ skip_retry = true;
+ goto retry;
+ }
if (need_swapping && type == LRU_GEN_ANON)
*need_swapping = true;
@@ -5354,7 +5392,7 @@ static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, c
if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))
caps |= BIT(LRU_GEN_MM_WALK);
- if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) && get_cap(LRU_GEN_NONLEAF_YOUNG))
+ if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG))
caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
return snprintf(buf, PAGE_SIZE, "0x%04x\n", caps);
@@ -5844,8 +5882,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
enum lru_list lru;
unsigned long nr_reclaimed = 0;
unsigned long nr_to_reclaim = sc->nr_to_reclaim;
+ bool proportional_reclaim;
struct blk_plug plug;
- bool scan_adjusted;
if (lru_gen_enabled()) {
lru_gen_shrink_lruvec(lruvec, sc);
@@ -5868,8 +5906,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
* abort proportional reclaim if either the file or anon lru has already
* dropped to zero at the first pass.
*/
- scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
- sc->priority == DEF_PRIORITY);
+ proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
+ sc->priority == DEF_PRIORITY);
blk_start_plug(&plug);
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -5889,7 +5927,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
cond_resched();
- if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
+ if (nr_reclaimed < nr_to_reclaim || proportional_reclaim)
continue;
/*
@@ -5940,8 +5978,6 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
nr_scanned = targets[lru] - nr[lru];
nr[lru] = targets[lru] * (100 - percentage) / 100;
nr[lru] -= min(nr[lru], nr_scanned);
-
- scan_adjusted = true;
}
blk_finish_plug(&plug);
sc->nr_reclaimed += nr_reclaimed;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 56a186768750..07db2f436d44 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -120,7 +120,7 @@ struct p9_conn {
struct list_head unsent_req_list;
struct p9_req_t *rreq;
struct p9_req_t *wreq;
- char tmp_buf[7];
+ char tmp_buf[P9_HDRSZ];
struct p9_fcall rc;
int wpos;
int wsize;
@@ -202,9 +202,11 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
list_move(&req->req_list, &cancel_list);
+ req->status = REQ_STATUS_ERROR;
}
list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
list_move(&req->req_list, &cancel_list);
+ req->status = REQ_STATUS_ERROR;
}
spin_unlock(&m->req_lock);
@@ -291,7 +293,7 @@ static void p9_read_work(struct work_struct *work)
if (!m->rc.sdata) {
m->rc.sdata = m->tmp_buf;
m->rc.offset = 0;
- m->rc.capacity = 7; /* start by reading header */
+ m->rc.capacity = P9_HDRSZ; /* start by reading header */
}
clear_bit(Rpending, &m->wsched);
@@ -314,7 +316,7 @@ static void p9_read_work(struct work_struct *work)
p9_debug(P9_DEBUG_TRANS, "got new header\n");
/* Header size */
- m->rc.size = 7;
+ m->rc.size = P9_HDRSZ;
err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0);
if (err) {
p9_debug(P9_DEBUG_ERROR,
@@ -322,14 +324,6 @@ static void p9_read_work(struct work_struct *work)
goto error;
}
- if (m->rc.size >= m->client->msize) {
- p9_debug(P9_DEBUG_ERROR,
- "requested packet size too big: %d\n",
- m->rc.size);
- err = -EIO;
- goto error;
- }
-
p9_debug(P9_DEBUG_TRANS,
"mux %p pkt: size: %d bytes tag: %d\n",
m, m->rc.size, m->rc.tag);
@@ -342,6 +336,14 @@ static void p9_read_work(struct work_struct *work)
goto error;
}
+ if (m->rc.size > m->rreq->rc.capacity) {
+ p9_debug(P9_DEBUG_ERROR,
+ "requested packet size too big: %d for tag %d with capacity %zd\n",
+ m->rc.size, m->rc.tag, m->rreq->rc.capacity);
+ err = -EIO;
+ goto error;
+ }
+
if (!m->rreq->rc.sdata) {
p9_debug(P9_DEBUG_ERROR,
"No recv fcall for tag %d (req %p), disconnecting!\n",
@@ -860,8 +862,10 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
struct file *file;
p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
- if (!p)
+ if (!p) {
+ sock_release(csocket);
return -ENOMEM;
+ }
csocket->sk->sk_allocation = GFP_NOIO;
file = sock_alloc_file(csocket, 0, NULL);
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index b15c64128c3e..aaa5fd364691 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -208,6 +208,14 @@ static void p9_xen_response(struct work_struct *work)
continue;
}
+ if (h.size > req->rc.capacity) {
+ dev_warn(&priv->dev->dev,
+ "requested packet size too big: %d for tag %d with capacity %zd\n",
+ h.size, h.tag, req->rc.capacity);
+ req->status = REQ_STATUS_ERROR;
+ goto recv_error;
+ }
+
memcpy(&req->rc, &h, sizeof(h));
req->rc.offset = 0;
@@ -217,6 +225,7 @@ static void p9_xen_response(struct work_struct *work)
masked_prod, &masked_cons,
XEN_9PFS_RING_SIZE(ring));
+recv_error:
virt_mb();
cons += h.size;
ring->intf->in_cons = cons;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 13d578ce2a09..fcb3e6c5e03c 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -774,6 +774,7 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
if (user_size > size)
return ERR_PTR(-EMSGSIZE);
+ size = SKB_DATA_ALIGN(size);
data = kzalloc(size + headroom + tailroom, GFP_USER);
if (!data)
return ERR_PTR(-ENOMEM);
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 6e53dc991409..9ffd40b8270c 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -959,6 +959,8 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
list_for_each_entry(p, &br->port_list, list) {
vg = nbp_vlan_group(p);
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
+ if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
+ continue;
err = vlan_vid_add(p->dev, proto, vlan->vid);
if (err)
goto err_filt;
@@ -973,8 +975,11 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
/* Delete VLANs for the old proto from the device filter. */
list_for_each_entry(p, &br->port_list, list) {
vg = nbp_vlan_group(p);
- list_for_each_entry(vlan, &vg->vlan_list, vlist)
+ list_for_each_entry(vlan, &vg->vlan_list, vlist) {
+ if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
+ continue;
vlan_vid_del(p->dev, oldproto, vlan->vid);
+ }
}
return 0;
@@ -983,13 +988,19 @@ err_filt:
attr.u.vlan_protocol = ntohs(oldproto);
switchdev_port_attr_set(br->dev, &attr, NULL);
- list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
+ list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) {
+ if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
+ continue;
vlan_vid_del(p->dev, proto, vlan->vid);
+ }
list_for_each_entry_continue_reverse(p, &br->port_list, list) {
vg = nbp_vlan_group(p);
- list_for_each_entry(vlan, &vg->vlan_list, vlist)
+ list_for_each_entry(vlan, &vg->vlan_list, vlist) {
+ if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
+ continue;
vlan_vid_del(p->dev, proto, vlan->vid);
+ }
}
return err;
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 4d63ef13a1fd..f35fc87c453a 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -310,9 +310,6 @@ static int chnl_net_open(struct net_device *dev)
if (result == 0) {
pr_debug("connect timeout\n");
- caif_disconnect_client(dev_net(dev), &priv->chnl);
- priv->state = CAIF_DISCONNECTED;
- pr_debug("state disconnected\n");
result = -ETIMEDOUT;
goto error;
}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 25cd35f5922e..007730412947 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -296,7 +296,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
key->ct_zone = ct->zone.id;
#endif
#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
- key->ct_mark = ct->mark;
+ key->ct_mark = READ_ONCE(ct->mark);
#endif
cl = nf_ct_labels_find(ct);
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index 6fac2f0ef074..711cd3b4347a 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -48,9 +48,11 @@ static const char *lwtunnel_encap_str(enum lwtunnel_encap_types encap_type)
return "RPL";
case LWTUNNEL_ENCAP_IOAM6:
return "IOAM6";
+ case LWTUNNEL_ENCAP_XFRM:
+ /* module autoload not supported for encap type */
+ return NULL;
case LWTUNNEL_ENCAP_IP6:
case LWTUNNEL_ENCAP_IP:
- case LWTUNNEL_ENCAP_XFRM:
case LWTUNNEL_ENCAP_NONE:
case __LWTUNNEL_ENCAP_MAX:
/* should not have got here */
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index a77a85e357e0..952a54763358 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -307,7 +307,31 @@ static int neigh_del_timer(struct neighbour *n)
return 0;
}
-static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
+static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
+ int family)
+{
+ switch (family) {
+ case AF_INET:
+ return __in_dev_arp_parms_get_rcu(dev);
+ case AF_INET6:
+ return __in6_dev_nd_parms_get_rcu(dev);
+ }
+ return NULL;
+}
+
+static void neigh_parms_qlen_dec(struct net_device *dev, int family)
+{
+ struct neigh_parms *p;
+
+ rcu_read_lock();
+ p = neigh_get_dev_parms_rcu(dev, family);
+ if (p)
+ p->qlen--;
+ rcu_read_unlock();
+}
+
+static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
+ int family)
{
struct sk_buff_head tmp;
unsigned long flags;
@@ -321,13 +345,7 @@ static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
struct net_device *dev = skb->dev;
if (net == NULL || net_eq(dev_net(dev), net)) {
- struct in_device *in_dev;
-
- rcu_read_lock();
- in_dev = __in_dev_get_rcu(dev);
- if (in_dev)
- in_dev->arp_parms->qlen--;
- rcu_read_unlock();
+ neigh_parms_qlen_dec(dev, family);
__skb_unlink(skb, list);
__skb_queue_tail(&tmp, skb);
}
@@ -409,7 +427,8 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev, skip_perm);
pneigh_ifdown_and_unlock(tbl, dev);
- pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL);
+ pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
+ tbl->family);
if (skb_queue_empty_lockless(&tbl->proxy_queue))
del_timer_sync(&tbl->proxy_timer);
return 0;
@@ -1621,13 +1640,8 @@ static void neigh_proxy_process(struct timer_list *t)
if (tdif <= 0) {
struct net_device *dev = skb->dev;
- struct in_device *in_dev;
- rcu_read_lock();
- in_dev = __in_dev_get_rcu(dev);
- if (in_dev)
- in_dev->arp_parms->qlen--;
- rcu_read_unlock();
+ neigh_parms_qlen_dec(dev, tbl->family);
__skb_unlink(skb, &tbl->proxy_queue);
if (tbl->proxy_redo && netif_running(dev)) {
@@ -1821,7 +1835,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
cancel_delayed_work_sync(&tbl->managed_work);
cancel_delayed_work_sync(&tbl->gc_work);
del_timer_sync(&tbl->proxy_timer);
- pneigh_queue_purge(&tbl->proxy_queue, NULL);
+ pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
neigh_ifdown(tbl, NULL);
if (atomic_read(&tbl->entries))
pr_crit("neighbour leakage\n");
@@ -3539,18 +3553,6 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
return ret;
}
-static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
- int family)
-{
- switch (family) {
- case AF_INET:
- return __in_dev_arp_parms_get_rcu(dev);
- case AF_INET6:
- return __in6_dev_nd_parms_get_rcu(dev);
- }
- return NULL;
-}
-
static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
int index)
{
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 713b7b8dad7e..b780827f5e0a 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -45,11 +45,10 @@ static unsigned int dccp_v4_pernet_id __read_mostly;
int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
- struct inet_bind_hashbucket *prev_addr_hashbucket = NULL;
- __be32 daddr, nexthop, prev_sk_rcv_saddr;
struct inet_sock *inet = inet_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
__be16 orig_sport, orig_dport;
+ __be32 daddr, nexthop;
struct flowi4 *fl4;
struct rtable *rt;
int err;
@@ -91,26 +90,13 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
daddr = fl4->daddr;
if (inet->inet_saddr == 0) {
- if (inet_csk(sk)->icsk_bind2_hash) {
- prev_addr_hashbucket =
- inet_bhashfn_portaddr(&dccp_hashinfo, sk,
- sock_net(sk),
- inet->inet_num);
- prev_sk_rcv_saddr = sk->sk_rcv_saddr;
- }
- inet->inet_saddr = fl4->saddr;
- }
-
- sk_rcv_saddr_set(sk, inet->inet_saddr);
-
- if (prev_addr_hashbucket) {
- err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
+ err = inet_bhash2_update_saddr(sk, &fl4->saddr, AF_INET);
if (err) {
- inet->inet_saddr = 0;
- sk_rcv_saddr_set(sk, prev_sk_rcv_saddr);
ip_rt_put(rt);
return err;
}
+ } else {
+ sk_rcv_saddr_set(sk, inet->inet_saddr);
}
inet->inet_dport = usin->sin_port;
@@ -157,6 +143,7 @@ failure:
* This unhashes the socket and releases the local port, if necessary.
*/
dccp_set_state(sk, DCCP_CLOSED);
+ inet_bhash2_reset_saddr(sk);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->inet_dport = 0;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index e57b43006074..602f3432d80b 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -934,26 +934,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
}
if (saddr == NULL) {
- struct inet_bind_hashbucket *prev_addr_hashbucket = NULL;
- struct in6_addr prev_v6_rcv_saddr;
-
- if (icsk->icsk_bind2_hash) {
- prev_addr_hashbucket = inet_bhashfn_portaddr(&dccp_hashinfo,
- sk, sock_net(sk),
- inet->inet_num);
- prev_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
- }
-
saddr = &fl6.saddr;
- sk->sk_v6_rcv_saddr = *saddr;
-
- if (prev_addr_hashbucket) {
- err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
- if (err) {
- sk->sk_v6_rcv_saddr = prev_v6_rcv_saddr;
- goto failure;
- }
- }
+
+ err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
+ if (err)
+ goto failure;
}
/* set the source address */
@@ -985,6 +970,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
late_failure:
dccp_set_state(sk, DCCP_CLOSED);
+ inet_bhash2_reset_saddr(sk);
__sk_dst_reset(sk);
failure:
inet->inet_dport = 0;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index c548ca3e9b0e..85e35c5e8890 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -279,8 +279,7 @@ int dccp_disconnect(struct sock *sk, int flags)
inet->inet_dport = 0;
- if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
- inet_reset_saddr(sk);
+ inet_bhash2_reset_saddr(sk);
sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index e504a18fc125..5417f7b1187c 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -864,6 +864,14 @@ disconnect:
return err;
}
+static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
+{
+ const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
+
+ if (tag_ops->disconnect)
+ tag_ops->disconnect(ds);
+}
+
static int dsa_switch_setup(struct dsa_switch *ds)
{
struct dsa_devlink_priv *dl_priv;
@@ -953,6 +961,8 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
ds->slave_mii_bus = NULL;
}
+ dsa_switch_teardown_tag_protocol(ds);
+
if (ds->ops->teardown)
ds->ops->teardown(ds);
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 6e65c7ffd6f3..71e9707d11d4 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -210,6 +210,7 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
extern struct rtnl_link_ops dsa_link_ops __read_mostly;
/* port.c */
+bool dsa_port_supports_hwtstamp(struct dsa_port *dp, struct ifreq *ifr);
void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
const struct dsa_device_ops *tag_ops);
int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age);
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 40367ab41cf8..421de166515f 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -204,8 +204,7 @@ static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
* switch in the tree that is PTP capable.
*/
list_for_each_entry(dp, &dst->ports, list)
- if (dp->ds->ops->port_hwtstamp_get ||
- dp->ds->ops->port_hwtstamp_set)
+ if (dsa_port_supports_hwtstamp(dp, ifr))
return -EBUSY;
break;
}
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 208168276995..750fe68d9b2a 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -110,6 +110,22 @@ static bool dsa_port_can_configure_learning(struct dsa_port *dp)
return !err;
}
+bool dsa_port_supports_hwtstamp(struct dsa_port *dp, struct ifreq *ifr)
+{
+ struct dsa_switch *ds = dp->ds;
+ int err;
+
+ if (!ds->ops->port_hwtstamp_get || !ds->ops->port_hwtstamp_set)
+ return false;
+
+ /* "See through" shim implementations of the "get" method.
+ * This will clobber the ifreq structure, but we will either return an
+ * error, or the master will overwrite it with proper values.
+ */
+ err = ds->ops->port_hwtstamp_get(ds, dp->index, ifr);
+ return err != -EOPNOTSUPP;
+}
+
int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
{
struct dsa_switch *ds = dp->ds;
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index a50429a62f74..56bb27d67a2e 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -351,17 +351,18 @@ static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
struct hsr_node *node_src)
{
bool was_multicast_frame;
- int res;
+ int res, recv_len;
was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
hsr_addr_subst_source(node_src, skb);
skb_pull(skb, ETH_HLEN);
+ recv_len = skb->len;
res = netif_rx(skb);
if (res == NET_RX_DROP) {
dev->stats.rx_dropped++;
} else {
dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_bytes += recv_len;
if (was_multicast_frame)
dev->stats.multicast++;
}
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index e983bb0c5012..2dfb12230f08 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -402,6 +402,16 @@ config INET_IPCOMP
If unsure, say Y.
+config INET_TABLE_PERTURB_ORDER
+ int "INET: Source port perturbation table size (as power of 2)" if EXPERT
+ default 16
+ help
+ Source port perturbation table size (as power of 2) for
+ RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm.
+
+ The default is almost always what you want.
+ Only change this if you know what you are doing.
+
config INET_XFRM_TUNNEL
tristate
select INET_TUNNEL
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 4728087c42a5..0da679411330 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1230,7 +1230,6 @@ EXPORT_SYMBOL(inet_unregister_protosw);
static int inet_sk_reselect_saddr(struct sock *sk)
{
- struct inet_bind_hashbucket *prev_addr_hashbucket;
struct inet_sock *inet = inet_sk(sk);
__be32 old_saddr = inet->inet_saddr;
__be32 daddr = inet->inet_daddr;
@@ -1260,16 +1259,8 @@ static int inet_sk_reselect_saddr(struct sock *sk)
return 0;
}
- prev_addr_hashbucket =
- inet_bhashfn_portaddr(tcp_or_dccp_get_hashinfo(sk), sk,
- sock_net(sk), inet->inet_num);
-
- inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
-
- err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
+ err = inet_bhash2_update_saddr(sk, &new_saddr, AF_INET);
if (err) {
- inet->inet_saddr = old_saddr;
- inet->inet_rcv_saddr = old_saddr;
ip_rt_put(rt);
return err;
}
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 170152772d33..3969fa805679 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -314,6 +314,9 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
xo->seq.low += skb_shinfo(skb)->gso_segs;
}
+ if (xo->seq.low < seq)
+ xo->seq.hi++;
+
esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
ip_hdr(skb)->tot_len = htons(skb->len);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index f721c308248b..19a662003eef 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -888,9 +888,11 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
return 1;
}
- /* cannot match on nexthop object attributes */
- if (fi->nh)
- return 1;
+ if (fi->nh) {
+ if (cfg->fc_oif || cfg->fc_gw_family || cfg->fc_mp)
+ return 1;
+ return 0;
+ }
if (cfg->fc_oif || cfg->fc_gw_family) {
struct fib_nh *nh;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 452ff177e4da..74d403dbd2b4 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -126,7 +126,7 @@ struct key_vector {
/* This list pointer if valid if (pos | bits) == 0 (LEAF) */
struct hlist_head leaf;
/* This array is valid if (pos | bits) > 0 (TNODE) */
- struct key_vector __rcu *tnode[0];
+ DECLARE_FLEX_ARRAY(struct key_vector __rcu *, tnode);
};
};
@@ -1381,8 +1381,10 @@ int fib_table_insert(struct net *net, struct fib_table *tb,
/* The alias was already inserted, so the node must exist. */
l = l ? l : fib_find_node(t, &tp, key);
- if (WARN_ON_ONCE(!l))
+ if (WARN_ON_ONCE(!l)) {
+ err = -ENOENT;
goto out_free_new_fa;
+ }
if (fib_find_alias(&l->leaf, new_fa->fa_slen, 0, 0, tb->tb_id, true) ==
new_fa) {
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index d3dc28156622..3cec471a2cd2 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -858,34 +858,80 @@ inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, in
return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
}
-int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct sock *sk)
+static void inet_update_saddr(struct sock *sk, void *saddr, int family)
+{
+ if (family == AF_INET) {
+ inet_sk(sk)->inet_saddr = *(__be32 *)saddr;
+ sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr);
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ else {
+ sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr;
+ }
+#endif
+}
+
+static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset)
{
struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
+ struct inet_bind_hashbucket *head, *head2;
struct inet_bind2_bucket *tb2, *new_tb2;
int l3mdev = inet_sk_bound_l3mdev(sk);
- struct inet_bind_hashbucket *head2;
int port = inet_sk(sk)->inet_num;
struct net *net = sock_net(sk);
+ int bhash;
+
+ if (!inet_csk(sk)->icsk_bind2_hash) {
+ /* Not bind()ed before. */
+ if (reset)
+ inet_reset_saddr(sk);
+ else
+ inet_update_saddr(sk, saddr, family);
+
+ return 0;
+ }
/* Allocate a bind2 bucket ahead of time to avoid permanently putting
* the bhash2 table in an inconsistent state if a new tb2 bucket
* allocation fails.
*/
new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC);
- if (!new_tb2)
+ if (!new_tb2) {
+ if (reset) {
+ /* The (INADDR_ANY, port) bucket might have already
+ * been freed, then we cannot fixup icsk_bind2_hash,
+ * so we give up and unlink sk from bhash/bhash2 not
+ * to leave inconsistency in bhash2.
+ */
+ inet_put_port(sk);
+ inet_reset_saddr(sk);
+ }
+
return -ENOMEM;
+ }
+ bhash = inet_bhashfn(net, port, hinfo->bhash_size);
+ head = &hinfo->bhash[bhash];
head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
- if (prev_saddr) {
- spin_lock_bh(&prev_saddr->lock);
- __sk_del_bind2_node(sk);
- inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep,
- inet_csk(sk)->icsk_bind2_hash);
- spin_unlock_bh(&prev_saddr->lock);
- }
+ /* If we change saddr locklessly, another thread
+ * iterating over bhash might see corrupted address.
+ */
+ spin_lock_bh(&head->lock);
- spin_lock_bh(&head2->lock);
+ spin_lock(&head2->lock);
+ __sk_del_bind2_node(sk);
+ inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, inet_csk(sk)->icsk_bind2_hash);
+ spin_unlock(&head2->lock);
+
+ if (reset)
+ inet_reset_saddr(sk);
+ else
+ inet_update_saddr(sk, saddr, family);
+
+ head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
+
+ spin_lock(&head2->lock);
tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
if (!tb2) {
tb2 = new_tb2;
@@ -893,26 +939,40 @@ int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct soc
}
sk_add_bind2_node(sk, &tb2->owners);
inet_csk(sk)->icsk_bind2_hash = tb2;
- spin_unlock_bh(&head2->lock);
+ spin_unlock(&head2->lock);
+
+ spin_unlock_bh(&head->lock);
if (tb2 != new_tb2)
kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2);
return 0;
}
+
+int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family)
+{
+ return __inet_bhash2_update_saddr(sk, saddr, family, false);
+}
EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr);
+void inet_bhash2_reset_saddr(struct sock *sk)
+{
+ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
+ __inet_bhash2_update_saddr(sk, NULL, 0, true);
+}
+EXPORT_SYMBOL_GPL(inet_bhash2_reset_saddr);
+
/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm
* Note that we use 32bit integers (vs RFC 'short integers')
* because 2^16 is not a multiple of num_ephemeral and this
* property might be used by clever attacker.
+ *
* RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
- * attacks were since demonstrated, thus we use 65536 instead to really
- * give more isolation and privacy, at the expense of 256kB of kernel
- * memory.
+ * attacks were since demonstrated, thus we use 65536 by default instead
+ * to really give more isolation and privacy, at the expense of 256kB
+ * of kernel memory.
*/
-#define INET_TABLE_PERTURB_SHIFT 16
-#define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT)
+#define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER)
static u32 *table_perturb;
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 1b512390b3cf..e880ce77322a 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -366,6 +366,11 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
iph->tos, dev);
if (unlikely(err))
goto drop_error;
+ } else {
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
+
+ if (in_dev && IN_DEV_ORCONF(in_dev, NOPOLICY))
+ IPCB(skb)->flags |= IPSKB_NOPOLICY;
}
#ifdef CONFIG_IP_ROUTE_CLASSID
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index f8e176c77d1c..b3cc416ed292 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -435,7 +435,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
switch (ctinfo) {
case IP_CT_NEW:
- ct->mark = hash;
+ WRITE_ONCE(ct->mark, hash);
break;
case IP_CT_RELATED:
case IP_CT_RELATED_REPLY:
@@ -452,7 +452,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
#ifdef DEBUG
nf_ct_dump_tuple_ip(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
#endif
- pr_debug("hash=%u ct_hash=%u ", hash, ct->mark);
+ pr_debug("hash=%u ct_hash=%u ", hash, READ_ONCE(ct->mark));
if (!clusterip_responsible(cipinfo->config, hash)) {
pr_debug("not responsible\n");
return NF_DROP;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 54836a6b81d6..4f2205756cfe 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3114,8 +3114,7 @@ int tcp_disconnect(struct sock *sk, int flags)
inet->inet_dport = 0;
- if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
- inet_reset_saddr(sk);
+ inet_bhash2_reset_saddr(sk);
sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 87d440f47a70..da46357f501b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -199,15 +199,14 @@ static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
- struct inet_bind_hashbucket *prev_addr_hashbucket = NULL;
struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
struct inet_timewait_death_row *tcp_death_row;
- __be32 daddr, nexthop, prev_sk_rcv_saddr;
struct inet_sock *inet = inet_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct ip_options_rcu *inet_opt;
struct net *net = sock_net(sk);
__be16 orig_sport, orig_dport;
+ __be32 daddr, nexthop;
struct flowi4 *fl4;
struct rtable *rt;
int err;
@@ -251,24 +250,13 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
if (!inet->inet_saddr) {
- if (inet_csk(sk)->icsk_bind2_hash) {
- prev_addr_hashbucket = inet_bhashfn_portaddr(tcp_death_row->hashinfo,
- sk, net, inet->inet_num);
- prev_sk_rcv_saddr = sk->sk_rcv_saddr;
- }
- inet->inet_saddr = fl4->saddr;
- }
-
- sk_rcv_saddr_set(sk, inet->inet_saddr);
-
- if (prev_addr_hashbucket) {
- err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
+ err = inet_bhash2_update_saddr(sk, &fl4->saddr, AF_INET);
if (err) {
- inet->inet_saddr = 0;
- sk_rcv_saddr_set(sk, prev_sk_rcv_saddr);
ip_rt_put(rt);
return err;
}
+ } else {
+ sk_rcv_saddr_set(sk, inet->inet_saddr);
}
if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
@@ -343,6 +331,7 @@ failure:
* if necessary.
*/
tcp_set_state(sk, TCP_CLOSE);
+ inet_bhash2_reset_saddr(sk);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->inet_dport = 0;
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 79d43548279c..242f4295940e 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -346,6 +346,9 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
xo->seq.low += skb_shinfo(skb)->gso_segs;
}
+ if (xo->seq.low < seq)
+ xo->seq.hi++;
+
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
len = skb->len - sizeof(struct ipv6hdr);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 2a3f9296df1e..f0548dbcabd2 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -292,24 +292,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
if (!saddr) {
- struct inet_bind_hashbucket *prev_addr_hashbucket = NULL;
- struct in6_addr prev_v6_rcv_saddr;
-
- if (icsk->icsk_bind2_hash) {
- prev_addr_hashbucket = inet_bhashfn_portaddr(tcp_death_row->hashinfo,
- sk, net, inet->inet_num);
- prev_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
- }
saddr = &fl6.saddr;
- sk->sk_v6_rcv_saddr = *saddr;
- if (prev_addr_hashbucket) {
- err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
- if (err) {
- sk->sk_v6_rcv_saddr = prev_v6_rcv_saddr;
- goto failure;
- }
- }
+ err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
+ if (err)
+ goto failure;
}
/* set the source address */
@@ -359,6 +346,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
late_failure:
tcp_set_state(sk, TCP_CLOSE);
+ inet_bhash2_reset_saddr(sk);
failure:
inet->inet_dport = 0;
sk->sk_route_caps = 0;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 4a4b0e49ec92..ea435eba3053 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -287,9 +287,13 @@ int __init xfrm6_init(void)
if (ret)
goto out_state;
- register_pernet_subsys(&xfrm6_net_ops);
+ ret = register_pernet_subsys(&xfrm6_net_ops);
+ if (ret)
+ goto out_protocol;
out:
return ret;
+out_protocol:
+ xfrm6_protocol_fini();
out_state:
xfrm6_state_fini();
out_policy:
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index a5004228111d..890a2423f559 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -222,7 +222,7 @@ static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
struct sk_buff *skb;
struct kcm_sock *kcm;
- while ((skb = __skb_dequeue(head))) {
+ while ((skb = skb_dequeue(head))) {
/* Reset destructor to avoid calling kcm_rcv_ready */
skb->destructor = sock_rfree;
skb_orphan(skb);
@@ -1085,53 +1085,17 @@ out_error:
return err;
}
-static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
- long timeo, int *err)
-{
- struct sk_buff *skb;
-
- while (!(skb = skb_peek(&sk->sk_receive_queue))) {
- if (sk->sk_err) {
- *err = sock_error(sk);
- return NULL;
- }
-
- if (sock_flag(sk, SOCK_DONE))
- return NULL;
-
- if ((flags & MSG_DONTWAIT) || !timeo) {
- *err = -EAGAIN;
- return NULL;
- }
-
- sk_wait_data(sk, &timeo, NULL);
-
- /* Handle signals */
- if (signal_pending(current)) {
- *err = sock_intr_errno(timeo);
- return NULL;
- }
- }
-
- return skb;
-}
-
static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
struct sock *sk = sock->sk;
struct kcm_sock *kcm = kcm_sk(sk);
int err = 0;
- long timeo;
struct strp_msg *stm;
int copied = 0;
struct sk_buff *skb;
- timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
-
- lock_sock(sk);
-
- skb = kcm_wait_data(sk, flags, timeo, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto out;
@@ -1162,14 +1126,11 @@ msg_finished:
/* Finished with message */
msg->msg_flags |= MSG_EOR;
KCM_STATS_INCR(kcm->stats.rx_msgs);
- skb_unlink(skb, &sk->sk_receive_queue);
- kfree_skb(skb);
}
}
out:
- release_sock(sk);
-
+ skb_free_datagram(sk, skb);
return copied ? : err;
}
@@ -1179,7 +1140,6 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
{
struct sock *sk = sock->sk;
struct kcm_sock *kcm = kcm_sk(sk);
- long timeo;
struct strp_msg *stm;
int err = 0;
ssize_t copied;
@@ -1187,11 +1147,7 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
/* Only support splice for SOCKSEQPACKET */
- timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
-
- lock_sock(sk);
-
- skb = kcm_wait_data(sk, flags, timeo, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto err_out;
@@ -1219,13 +1175,11 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
* finish reading the message.
*/
- release_sock(sk);
-
+ skb_free_datagram(sk, skb);
return copied;
err_out:
- release_sock(sk);
-
+ skb_free_datagram(sk, skb);
return err;
}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index c85df5b958d2..95edcbedf6ef 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2905,7 +2905,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
break;
if (!aalg->pfkey_supported)
continue;
- if (aalg_tmpl_set(t, aalg) && aalg->available)
+ if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
return sz + sizeof(struct sadb_prop);
@@ -2923,7 +2923,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
if (!ealg->pfkey_supported)
continue;
- if (!(ealg_tmpl_set(t, ealg) && ealg->available))
+ if (!(ealg_tmpl_set(t, ealg)))
continue;
for (k = 1; ; k++) {
@@ -2934,16 +2934,17 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
if (!aalg->pfkey_supported)
continue;
- if (aalg_tmpl_set(t, aalg) && aalg->available)
+ if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
}
return sz + sizeof(struct sadb_prop);
}
-static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
+static int dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
{
struct sadb_prop *p;
+ int sz = 0;
int i;
p = skb_put(skb, sizeof(struct sadb_prop));
@@ -2971,13 +2972,17 @@ static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
c->sadb_comb_soft_addtime = 20*60*60;
c->sadb_comb_hard_usetime = 8*60*60;
c->sadb_comb_soft_usetime = 7*60*60;
+ sz += sizeof(*c);
}
}
+
+ return sz + sizeof(*p);
}
-static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
+static int dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
{
struct sadb_prop *p;
+ int sz = 0;
int i, k;
p = skb_put(skb, sizeof(struct sadb_prop));
@@ -3019,8 +3024,11 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
c->sadb_comb_soft_addtime = 20*60*60;
c->sadb_comb_hard_usetime = 8*60*60;
c->sadb_comb_soft_usetime = 7*60*60;
+ sz += sizeof(*c);
}
}
+
+ return sz + sizeof(*p);
}
static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c)
@@ -3150,6 +3158,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
struct sadb_x_sec_ctx *sec_ctx;
struct xfrm_sec_ctx *xfrm_ctx;
int ctx_size = 0;
+ int alg_size = 0;
sockaddr_size = pfkey_sockaddr_size(x->props.family);
if (!sockaddr_size)
@@ -3161,16 +3170,16 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
sizeof(struct sadb_x_policy);
if (x->id.proto == IPPROTO_AH)
- size += count_ah_combs(t);
+ alg_size = count_ah_combs(t);
else if (x->id.proto == IPPROTO_ESP)
- size += count_esp_combs(t);
+ alg_size = count_esp_combs(t);
if ((xfrm_ctx = x->security)) {
ctx_size = PFKEY_ALIGN8(xfrm_ctx->ctx_len);
size += sizeof(struct sadb_x_sec_ctx) + ctx_size;
}
- skb = alloc_skb(size + 16, GFP_ATOMIC);
+ skb = alloc_skb(size + alg_size + 16, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
@@ -3224,10 +3233,13 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
pol->sadb_x_policy_priority = xp->priority;
/* Set sadb_comb's. */
+ alg_size = 0;
if (x->id.proto == IPPROTO_AH)
- dump_ah_combs(skb, t);
+ alg_size = dump_ah_combs(skb, t);
else if (x->id.proto == IPPROTO_ESP)
- dump_esp_combs(skb, t);
+ alg_size = dump_esp_combs(skb, t);
+
+ hdr->sadb_msg_len += alg_size / 8;
/* security context */
if (xfrm_ctx) {
@@ -3382,7 +3394,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
hdr->sadb_msg_len = size / sizeof(uint64_t);
hdr->sadb_msg_errno = 0;
hdr->sadb_msg_reserved = 0;
- hdr->sadb_msg_seq = x->km.seq = get_acqseq();
+ hdr->sadb_msg_seq = x->km.seq;
hdr->sadb_msg_pid = 0;
/* SA */
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 7499c51b1850..9a1415fe3fa7 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1150,8 +1150,10 @@ static void l2tp_tunnel_destruct(struct sock *sk)
}
/* Remove hooks into tunnel socket */
+ write_lock_bh(&sk->sk_callback_lock);
sk->sk_destruct = tunnel->old_sk_destruct;
sk->sk_user_data = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
/* Call the original destructor */
if (sk->sk_destruct)
@@ -1469,16 +1471,19 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
sock = sockfd_lookup(tunnel->fd, &ret);
if (!sock)
goto err;
-
- ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
- if (ret < 0)
- goto err_sock;
}
+ sk = sock->sk;
+ write_lock_bh(&sk->sk_callback_lock);
+ ret = l2tp_validate_socket(sk, net, tunnel->encap);
+ if (ret < 0)
+ goto err_inval_sock;
+ rcu_assign_sk_user_data(sk, tunnel);
+ write_unlock_bh(&sk->sk_callback_lock);
+
tunnel->l2tp_net = net;
pn = l2tp_pernet(net);
- sk = sock->sk;
sock_hold(sk);
tunnel->sock = sk;
@@ -1503,8 +1508,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
};
setup_udp_tunnel_sock(net, sock, &udp_cfg);
- } else {
- sk->sk_user_data = tunnel;
}
tunnel->old_sk_destruct = sk->sk_destruct;
@@ -1521,6 +1524,11 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
return 0;
err_sock:
+ write_lock_bh(&sk->sk_callback_lock);
+ rcu_assign_sk_user_data(sk, NULL);
+err_inval_sock:
+ write_unlock_bh(&sk->sk_callback_lock);
+
if (tunnel->fd < 0)
sock_release(sock);
else
diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c
index 2e66598fac79..e8ebd343e2bf 100644
--- a/net/mac80211/airtime.c
+++ b/net/mac80211/airtime.c
@@ -452,6 +452,9 @@ static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw,
(status->encoding == RX_ENC_HE && streams > 8)))
return 0;
+ if (idx >= MCS_GROUP_RATES)
+ return 0;
+
duration = airtime_mcs_groups[group].duration[idx];
duration <<= airtime_mcs_groups[group].shift;
*overhead = 36 + (streams << 2);
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index b6dc6e260334..1dbc62537259 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2354,12 +2354,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
goto out;
}
- /* if we are invoked by the msk cleanup code, the subflow is
- * already orphaned
- */
- if (ssk->sk_socket)
- sock_orphan(ssk);
-
+ sock_orphan(ssk);
subflow->disposable = 1;
/* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
@@ -2940,7 +2935,11 @@ cleanup:
if (ssk == msk->first)
subflow->fail_tout = 0;
- sock_orphan(ssk);
+ /* detach from the parent socket, but allow data_ready to
+ * push incoming data into the mptcp stack, to properly ack it
+ */
+ ssk->sk_socket = NULL;
+ ssk->sk_wq = NULL;
unlock_sock_fast(ssk, slow);
}
sock_orphan(sk);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 02a54d59697b..2159b5f9988f 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1745,16 +1745,16 @@ void mptcp_subflow_queue_clean(struct sock *listener_ssk)
for (msk = head; msk; msk = next) {
struct sock *sk = (struct sock *)msk;
- bool slow, do_cancel_work;
+ bool do_cancel_work;
sock_hold(sk);
- slow = lock_sock_fast_nested(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
next = msk->dl_next;
msk->first = NULL;
msk->dl_next = NULL;
do_cancel_work = __mptcp_close(sk, 0);
- unlock_sock_fast(sk, slow);
+ release_sock(sk);
if (do_cancel_work)
mptcp_cancel_work(sk);
sock_put(sk);
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 3adc291d9ce1..7499192af586 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -916,7 +916,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
#ifdef IP_SET_HASH_WITH_MULTI
if (h->bucketsize >= AHASH_MAX_TUNED)
goto set_full;
- else if (h->bucketsize < multi)
+ else if (h->bucketsize <= multi)
h->bucketsize += AHASH_INIT_SIZE;
#endif
if (n->size >= AHASH_MAX(h)) {
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index dd30c03d5a23..75d556d71652 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -151,18 +151,16 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE)
return -ERANGE;
- if (retried) {
+ if (retried)
ip = ntohl(h->next.ip);
- e.ip = htonl(ip);
- }
for (; ip <= ip_to;) {
+ e.ip = htonl(ip);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ip += hosts;
- e.ip = htonl(ip);
- if (e.ip == 0)
+ if (ip == 0)
return 0;
ret = 0;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f97bda06d2a9..2692139ce417 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1781,7 +1781,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
}
#ifdef CONFIG_NF_CONNTRACK_MARK
- ct->mark = exp->master->mark;
+ ct->mark = READ_ONCE(exp->master->mark);
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
ct->secmark = exp->master->secmark;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 7562b215b932..d71150a40fb0 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -328,9 +328,9 @@ nla_put_failure:
}
#ifdef CONFIG_NF_CONNTRACK_MARK
-static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_mark(struct sk_buff *skb, u32 mark)
{
- if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
+ if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
goto nla_put_failure;
return 0;
@@ -543,7 +543,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
{
if (ctnetlink_dump_status(skb, ct) < 0 ||
- ctnetlink_dump_mark(skb, ct) < 0 ||
+ ctnetlink_dump_mark(skb, READ_ONCE(ct->mark)) < 0 ||
ctnetlink_dump_secctx(skb, ct) < 0 ||
ctnetlink_dump_id(skb, ct) < 0 ||
ctnetlink_dump_use(skb, ct) < 0 ||
@@ -722,6 +722,7 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
struct sk_buff *skb;
unsigned int type;
unsigned int flags = 0, group;
+ u32 mark;
int err;
if (events & (1 << IPCT_DESTROY)) {
@@ -826,8 +827,9 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
}
#ifdef CONFIG_NF_CONNTRACK_MARK
- if ((events & (1 << IPCT_MARK) || ct->mark)
- && ctnetlink_dump_mark(skb, ct) < 0)
+ mark = READ_ONCE(ct->mark);
+ if ((events & (1 << IPCT_MARK) || mark) &&
+ ctnetlink_dump_mark(skb, mark) < 0)
goto nla_put_failure;
#endif
nlmsg_end(skb, nlh);
@@ -1154,7 +1156,7 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
}
#ifdef CONFIG_NF_CONNTRACK_MARK
- if ((ct->mark & filter->mark.mask) != filter->mark.val)
+ if ((READ_ONCE(ct->mark) & filter->mark.mask) != filter->mark.val)
goto ignore_entry;
#endif
status = (u32)READ_ONCE(ct->status);
@@ -2002,9 +2004,9 @@ static void ctnetlink_change_mark(struct nf_conn *ct,
mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
mark = ntohl(nla_get_be32(cda[CTA_MARK]));
- newmark = (ct->mark & mask) ^ mark;
- if (newmark != ct->mark)
- ct->mark = newmark;
+ newmark = (READ_ONCE(ct->mark) & mask) ^ mark;
+ if (newmark != READ_ONCE(ct->mark))
+ WRITE_ONCE(ct->mark, newmark);
}
#endif
@@ -2669,6 +2671,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
{
const struct nf_conntrack_zone *zone;
struct nlattr *nest_parms;
+ u32 mark;
zone = nf_ct_zone(ct);
@@ -2730,7 +2733,8 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
goto nla_put_failure;
#ifdef CONFIG_NF_CONNTRACK_MARK
- if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
+ mark = READ_ONCE(ct->mark);
+ if (mark && ctnetlink_dump_mark(skb, mark) < 0)
goto nla_put_failure;
#endif
if (ctnetlink_dump_labels(skb, ct) < 0)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 4ffe84c5a82c..bca839ab1ae8 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -366,7 +366,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
goto release;
#if defined(CONFIG_NF_CONNTRACK_MARK)
- seq_printf(s, "mark=%u ", ct->mark);
+ seq_printf(s, "mark=%u ", READ_ONCE(ct->mark));
#endif
ct_show_secctx(s, ct);
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index b04645ced89b..00b522890d77 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -1098,6 +1098,7 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
struct flow_block_cb *block_cb, *next;
int err = 0;
+ down_write(&flowtable->flow_block_lock);
switch (cmd) {
case FLOW_BLOCK_BIND:
list_splice(&bo->cb_list, &flowtable->flow_block.cb_list);
@@ -1112,6 +1113,7 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
WARN_ON_ONCE(1);
err = -EOPNOTSUPP;
}
+ up_write(&flowtable->flow_block_lock);
return err;
}
@@ -1168,7 +1170,9 @@ static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
extack);
+ down_write(&flowtable->flow_block_lock);
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo);
+ up_write(&flowtable->flow_block_lock);
if (err < 0)
return err;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index e7152d599d73..7a09421f19e1 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5958,7 +5958,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
&timeout);
if (err)
return err;
- } else if (set->flags & NFT_SET_TIMEOUT) {
+ } else if (set->flags & NFT_SET_TIMEOUT &&
+ !(flags & NFT_SET_ELEM_INTERVAL_END)) {
timeout = set->timeout;
}
@@ -6024,7 +6025,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err = -EOPNOTSUPP;
goto err_set_elem_expr;
}
- } else if (set->num_exprs > 0) {
+ } else if (set->num_exprs > 0 &&
+ !(flags & NFT_SET_ELEM_INTERVAL_END)) {
err = nft_set_elem_expr_clone(ctx, set, expr_array);
if (err < 0)
goto err_set_elem_expr_clone;
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index a3f01f209a53..641dc21f92b4 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -98,7 +98,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
return;
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
- *dest = ct->mark;
+ *dest = READ_ONCE(ct->mark);
return;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
@@ -297,8 +297,8 @@ static void nft_ct_set_eval(const struct nft_expr *expr,
switch (priv->key) {
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
- if (ct->mark != value) {
- ct->mark = value;
+ if (READ_ONCE(ct->mark) != value) {
+ WRITE_ONCE(ct->mark, value);
nf_conntrack_event_cache(IPCT_MARK, ct);
}
break;
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index e5ebc0810675..ad3c033db64e 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -30,6 +30,7 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
u_int32_t new_targetmark;
struct nf_conn *ct;
u_int32_t newmark;
+ u_int32_t oldmark;
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL)
@@ -37,14 +38,15 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
switch (info->mode) {
case XT_CONNMARK_SET:
- newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
+ oldmark = READ_ONCE(ct->mark);
+ newmark = (oldmark & ~info->ctmask) ^ info->ctmark;
if (info->shift_dir == D_SHIFT_RIGHT)
newmark >>= info->shift_bits;
else
newmark <<= info->shift_bits;
- if (ct->mark != newmark) {
- ct->mark = newmark;
+ if (READ_ONCE(ct->mark) != newmark) {
+ WRITE_ONCE(ct->mark, newmark);
nf_conntrack_event_cache(IPCT_MARK, ct);
}
break;
@@ -55,15 +57,15 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
else
new_targetmark <<= info->shift_bits;
- newmark = (ct->mark & ~info->ctmask) ^
+ newmark = (READ_ONCE(ct->mark) & ~info->ctmask) ^
new_targetmark;
- if (ct->mark != newmark) {
- ct->mark = newmark;
+ if (READ_ONCE(ct->mark) != newmark) {
+ WRITE_ONCE(ct->mark, newmark);
nf_conntrack_event_cache(IPCT_MARK, ct);
}
break;
case XT_CONNMARK_RESTORE:
- new_targetmark = (ct->mark & info->ctmask);
+ new_targetmark = (READ_ONCE(ct->mark) & info->ctmask);
if (info->shift_dir == D_SHIFT_RIGHT)
new_targetmark >>= info->shift_bits;
else
@@ -126,7 +128,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par)
if (ct == NULL)
return false;
- return ((ct->mark & info->mask) == info->mark) ^ info->invert;
+ return ((READ_ONCE(ct->mark) & info->mask) == info->mark) ^ info->invert;
}
static int connmark_mt_check(const struct xt_mtchk_param *par)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 6a193cce2a75..4ffdf2f45c44 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -542,7 +542,7 @@ static int nci_open_device(struct nci_dev *ndev)
skb_queue_purge(&ndev->tx_q);
ndev->ops->close(ndev);
- ndev->flags = 0;
+ ndev->flags &= BIT(NCI_UNREG);
}
done:
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index aa5e712adf07..3d36ea5701f0 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -279,8 +279,10 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_plen(skb->data));
conn_info = nci_get_conn_info_by_conn_id(ndev, nci_conn_id(skb->data));
- if (!conn_info)
+ if (!conn_info) {
+ kfree_skb(skb);
return;
+ }
/* strip the nci data header */
skb_pull(skb, NCI_DATA_HDR_SIZE);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index c7b10234cf7c..c8eaf4234b2e 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -152,7 +152,7 @@ static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo)
static u32 ovs_ct_get_mark(const struct nf_conn *ct)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
- return ct ? ct->mark : 0;
+ return ct ? READ_ONCE(ct->mark) : 0;
#else
return 0;
#endif
@@ -340,9 +340,9 @@ static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key,
#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
u32 new_mark;
- new_mark = ct_mark | (ct->mark & ~(mask));
- if (ct->mark != new_mark) {
- ct->mark = new_mark;
+ new_mark = ct_mark | (READ_ONCE(ct->mark) & ~(mask));
+ if (READ_ONCE(ct->mark) != new_mark) {
+ WRITE_ONCE(ct->mark, new_mark);
if (nf_ct_is_confirmed(ct))
nf_conntrack_event_cache(IPCT_MARK, ct);
key->ct.mark = new_mark;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6ce8dd19f33c..1ab65f7f2a0a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2293,8 +2293,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= TP_STATUS_CSUMNOTREADY;
else if (skb->pkt_type != PACKET_OUTGOING &&
- (skb->ip_summed == CHECKSUM_COMPLETE ||
- skb_csum_unnecessary(skb)))
+ skb_csum_unnecessary(skb))
status |= TP_STATUS_CSUM_VALID;
if (snaplen > res)
@@ -3520,8 +3519,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (skb->ip_summed == CHECKSUM_PARTIAL)
aux.tp_status |= TP_STATUS_CSUMNOTREADY;
else if (skb->pkt_type != PACKET_OUTGOING &&
- (skb->ip_summed == CHECKSUM_COMPLETE ||
- skb_csum_unnecessary(skb)))
+ skb_csum_unnecessary(skb))
aux.tp_status |= TP_STATUS_CSUM_VALID;
aux.tp_len = origlen;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 1ad0ec5afb50..8499ceb7719c 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -399,6 +399,7 @@ enum rxrpc_conn_proto_state {
struct rxrpc_bundle {
struct rxrpc_conn_parameters params;
refcount_t ref;
+ atomic_t active; /* Number of active users */
unsigned int debug_id;
bool try_upgrade; /* True if the bundle is attempting upgrade */
bool alloc_conn; /* True if someone's getting a conn */
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 3c9eeb5b750c..bdb335cb2d05 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -40,6 +40,8 @@ __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
DEFINE_IDR(rxrpc_client_conn_ids);
static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
+static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle);
+
/*
* Get a connection ID and epoch for a client connection from the global pool.
* The connection struct pointer is then recorded in the idr radix tree. The
@@ -123,6 +125,7 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
bundle->params = *cp;
rxrpc_get_peer(bundle->params.peer);
refcount_set(&bundle->ref, 1);
+ atomic_set(&bundle->active, 1);
spin_lock_init(&bundle->channel_lock);
INIT_LIST_HEAD(&bundle->waiting_calls);
}
@@ -149,7 +152,7 @@ void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
dead = __refcount_dec_and_test(&bundle->ref, &r);
- _debug("PUT B=%x %d", d, r);
+ _debug("PUT B=%x %d", d, r - 1);
if (dead)
rxrpc_free_bundle(bundle);
}
@@ -338,6 +341,7 @@ found_bundle_free:
rxrpc_free_bundle(candidate);
found_bundle:
rxrpc_get_bundle(bundle);
+ atomic_inc(&bundle->active);
spin_unlock(&local->client_bundles_lock);
_leave(" = %u [found]", bundle->debug_id);
return bundle;
@@ -435,6 +439,7 @@ static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp)
if (old)
trace_rxrpc_client(old, -1, rxrpc_client_replace);
candidate->bundle_shift = shift;
+ atomic_inc(&bundle->active);
bundle->conns[i] = candidate;
for (j = 0; j < RXRPC_MAXCALLS; j++)
set_bit(shift + j, &bundle->avail_chans);
@@ -725,6 +730,7 @@ granted_channel:
smp_rmb();
out_put_bundle:
+ rxrpc_deactivate_bundle(bundle);
rxrpc_put_bundle(bundle);
out:
_leave(" = %d", ret);
@@ -900,9 +906,8 @@ out:
static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
{
struct rxrpc_bundle *bundle = conn->bundle;
- struct rxrpc_local *local = bundle->params.local;
unsigned int bindex;
- bool need_drop = false, need_put = false;
+ bool need_drop = false;
int i;
_enter("C=%x", conn->debug_id);
@@ -921,15 +926,22 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
}
spin_unlock(&bundle->channel_lock);
- /* If there are no more connections, remove the bundle */
- if (!bundle->avail_chans) {
- _debug("maybe unbundle");
- spin_lock(&local->client_bundles_lock);
+ if (need_drop) {
+ rxrpc_deactivate_bundle(bundle);
+ rxrpc_put_connection(conn);
+ }
+}
- for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
- if (bundle->conns[i])
- break;
- if (i == ARRAY_SIZE(bundle->conns) && !bundle->params.exclusive) {
+/*
+ * Drop the active count on a bundle.
+ */
+static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
+{
+ struct rxrpc_local *local = bundle->params.local;
+ bool need_put = false;
+
+ if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) {
+ if (!bundle->params.exclusive) {
_debug("erase bundle");
rb_erase(&bundle->local_node, &local->client_bundles);
need_put = true;
@@ -939,10 +951,6 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
if (need_put)
rxrpc_put_bundle(bundle);
}
-
- if (need_drop)
- rxrpc_put_connection(conn);
- _leave("");
}
/*
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 1e8ab4749c6c..4662a6ce8a7e 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -976,7 +976,7 @@ config NET_ACT_TUNNEL_KEY
config NET_ACT_CT
tristate "connection tracking tc action"
- depends on NET_CLS_ACT && NF_CONNTRACK && NF_NAT && NF_FLOW_TABLE
+ depends on NET_CLS_ACT && NF_CONNTRACK && (!NF_NAT || NF_NAT) && NF_FLOW_TABLE
help
Say Y here to allow sending the packets to conntrack module.
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 66b143bb04ac..d41002e4613f 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -61,7 +61,7 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a,
c = nf_ct_get(skb, &ctinfo);
if (c) {
- skb->mark = c->mark;
+ skb->mark = READ_ONCE(c->mark);
/* using overlimits stats to count how many packets marked */
ca->tcf_qstats.overlimits++;
goto out;
@@ -81,7 +81,7 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a,
c = nf_ct_tuplehash_to_ctrack(thash);
/* using overlimits stats to count how many packets marked */
ca->tcf_qstats.overlimits++;
- skb->mark = c->mark;
+ skb->mark = READ_ONCE(c->mark);
nf_ct_put(c);
out:
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index b38d91d6b249..4c7f7861ea96 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -178,7 +178,7 @@ static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
entry = tcf_ct_flow_table_flow_action_get_next(action);
entry->id = FLOW_ACTION_CT_METADATA;
#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
- entry->ct_metadata.mark = ct->mark;
+ entry->ct_metadata.mark = READ_ONCE(ct->mark);
#endif
ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
IP_CT_ESTABLISHED_REPLY;
@@ -936,9 +936,9 @@ static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
if (!mask)
return;
- new_mark = mark | (ct->mark & ~(mask));
- if (ct->mark != new_mark) {
- ct->mark = new_mark;
+ new_mark = mark | (READ_ONCE(ct->mark) & ~(mask));
+ if (READ_ONCE(ct->mark) != new_mark) {
+ WRITE_ONCE(ct->mark, new_mark);
if (nf_ct_is_confirmed(ct))
nf_conntrack_event_cache(IPCT_MARK, ct);
}
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index d4102f0a9abd..eaa02f098d1c 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -32,7 +32,7 @@ static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
{
u8 dscp, newdscp;
- newdscp = (((ct->mark & cp->dscpmask) >> cp->dscpmaskshift) << 2) &
+ newdscp = (((READ_ONCE(ct->mark) & cp->dscpmask) >> cp->dscpmaskshift) << 2) &
~INET_ECN_MASK;
switch (proto) {
@@ -72,7 +72,7 @@ static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
struct sk_buff *skb)
{
ca->stats_cpmark_set++;
- skb->mark = ct->mark & cp->cpmarkmask;
+ skb->mark = READ_ONCE(ct->mark) & cp->cpmarkmask;
}
static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a,
@@ -130,7 +130,7 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a,
}
if (cp->mode & CTINFO_MODE_DSCP)
- if (!cp->dscpstatemask || (ct->mark & cp->dscpstatemask))
+ if (!cp->dscpstatemask || (READ_ONCE(ct->mark) & cp->dscpstatemask))
tcf_ctinfo_dscp_set(ct, ca, cp, skb, wlen, proto);
if (cp->mode & CTINFO_MODE_CPMARK)
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index ef9fceadef8d..ee6514af830f 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -52,6 +52,19 @@ static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt)
}
}
+static void sctp_stream_free_ext(struct sctp_stream *stream, __u16 sid)
+{
+ struct sctp_sched_ops *sched;
+
+ if (!SCTP_SO(stream, sid)->ext)
+ return;
+
+ sched = sctp_sched_ops_from_stream(stream);
+ sched->free_sid(stream, sid);
+ kfree(SCTP_SO(stream, sid)->ext);
+ SCTP_SO(stream, sid)->ext = NULL;
+}
+
/* Migrates chunks from stream queues to new stream queues if needed,
* but not across associations. Also, removes those chunks to streams
* higher than the new max.
@@ -70,16 +83,14 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
* sctp_stream_update will swap ->out pointers.
*/
for (i = 0; i < outcnt; i++) {
- kfree(SCTP_SO(new, i)->ext);
+ sctp_stream_free_ext(new, i);
SCTP_SO(new, i)->ext = SCTP_SO(stream, i)->ext;
SCTP_SO(stream, i)->ext = NULL;
}
}
- for (i = outcnt; i < stream->outcnt; i++) {
- kfree(SCTP_SO(stream, i)->ext);
- SCTP_SO(stream, i)->ext = NULL;
- }
+ for (i = outcnt; i < stream->outcnt; i++)
+ sctp_stream_free_ext(stream, i);
}
static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
@@ -174,9 +185,9 @@ void sctp_stream_free(struct sctp_stream *stream)
struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
int i;
- sched->free(stream);
+ sched->unsched_all(stream);
for (i = 0; i < stream->outcnt; i++)
- kfree(SCTP_SO(stream, i)->ext);
+ sctp_stream_free_ext(stream, i);
genradix_free(&stream->out);
genradix_free(&stream->in);
}
diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c
index 1ad565ed5627..7c8f9d89e16a 100644
--- a/net/sctp/stream_sched.c
+++ b/net/sctp/stream_sched.c
@@ -46,6 +46,10 @@ static int sctp_sched_fcfs_init_sid(struct sctp_stream *stream, __u16 sid,
return 0;
}
+static void sctp_sched_fcfs_free_sid(struct sctp_stream *stream, __u16 sid)
+{
+}
+
static void sctp_sched_fcfs_free(struct sctp_stream *stream)
{
}
@@ -96,6 +100,7 @@ static struct sctp_sched_ops sctp_sched_fcfs = {
.get = sctp_sched_fcfs_get,
.init = sctp_sched_fcfs_init,
.init_sid = sctp_sched_fcfs_init_sid,
+ .free_sid = sctp_sched_fcfs_free_sid,
.free = sctp_sched_fcfs_free,
.enqueue = sctp_sched_fcfs_enqueue,
.dequeue = sctp_sched_fcfs_dequeue,
diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c
index 80b5a2c4cbc7..4fc9f2923ed1 100644
--- a/net/sctp/stream_sched_prio.c
+++ b/net/sctp/stream_sched_prio.c
@@ -204,6 +204,24 @@ static int sctp_sched_prio_init_sid(struct sctp_stream *stream, __u16 sid,
return sctp_sched_prio_set(stream, sid, 0, gfp);
}
+static void sctp_sched_prio_free_sid(struct sctp_stream *stream, __u16 sid)
+{
+ struct sctp_stream_priorities *prio = SCTP_SO(stream, sid)->ext->prio_head;
+ int i;
+
+ if (!prio)
+ return;
+
+ SCTP_SO(stream, sid)->ext->prio_head = NULL;
+ for (i = 0; i < stream->outcnt; i++) {
+ if (SCTP_SO(stream, i)->ext &&
+ SCTP_SO(stream, i)->ext->prio_head == prio)
+ return;
+ }
+
+ kfree(prio);
+}
+
static void sctp_sched_prio_free(struct sctp_stream *stream)
{
struct sctp_stream_priorities *prio, *n;
@@ -323,6 +341,7 @@ static struct sctp_sched_ops sctp_sched_prio = {
.get = sctp_sched_prio_get,
.init = sctp_sched_prio_init,
.init_sid = sctp_sched_prio_init_sid,
+ .free_sid = sctp_sched_prio_free_sid,
.free = sctp_sched_prio_free,
.enqueue = sctp_sched_prio_enqueue,
.dequeue = sctp_sched_prio_dequeue,
diff --git a/net/sctp/stream_sched_rr.c b/net/sctp/stream_sched_rr.c
index ff425aed62c7..cc444fe0d67c 100644
--- a/net/sctp/stream_sched_rr.c
+++ b/net/sctp/stream_sched_rr.c
@@ -90,6 +90,10 @@ static int sctp_sched_rr_init_sid(struct sctp_stream *stream, __u16 sid,
return 0;
}
+static void sctp_sched_rr_free_sid(struct sctp_stream *stream, __u16 sid)
+{
+}
+
static void sctp_sched_rr_free(struct sctp_stream *stream)
{
sctp_sched_rr_unsched_all(stream);
@@ -177,6 +181,7 @@ static struct sctp_sched_ops sctp_sched_rr = {
.get = sctp_sched_rr_get,
.init = sctp_sched_rr_init,
.init_sid = sctp_sched_rr_init_sid,
+ .free_sid = sctp_sched_rr_free_sid,
.free = sctp_sched_rr_free,
.enqueue = sctp_sched_rr_enqueue,
.dequeue = sctp_sched_rr_dequeue,
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index f09316a9035f..d67440de011e 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -1971,6 +1971,9 @@ rcv:
/* Ok, everything's fine, try to synch own keys according to peers' */
tipc_crypto_key_synch(rx, *skb);
+ /* Re-fetch skb cb as skb might be changed in tipc_msg_validate */
+ skb_cb = TIPC_SKB_CB(*skb);
+
/* Mark skb decrypted */
skb_cb->decrypted = 1;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index e8630707901e..e8dcdf267c0c 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -211,7 +211,10 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
u32 self;
int err;
- skb_linearize(skb);
+ if (skb_linearize(skb)) {
+ kfree_skb(skb);
+ return;
+ }
hdr = buf_msg(skb);
if (caps & TIPC_NODE_ID128)
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index d92ec92f0b71..e3b427a70398 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -176,7 +176,7 @@ static void tipc_conn_close(struct tipc_conn *con)
conn_put(con);
}
-static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s)
+static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s, struct socket *sock)
{
struct tipc_conn *con;
int ret;
@@ -202,10 +202,12 @@ static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s)
}
con->conid = ret;
s->idr_in_use++;
- spin_unlock_bh(&s->idr_lock);
set_bit(CF_CONNECTED, &con->flags);
con->server = s;
+ con->sock = sock;
+ conn_get(con);
+ spin_unlock_bh(&s->idr_lock);
return con;
}
@@ -467,7 +469,7 @@ static void tipc_topsrv_accept(struct work_struct *work)
ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
if (ret < 0)
return;
- con = tipc_conn_alloc(srv);
+ con = tipc_conn_alloc(srv, newsock);
if (IS_ERR(con)) {
ret = PTR_ERR(con);
sock_release(newsock);
@@ -479,11 +481,11 @@ static void tipc_topsrv_accept(struct work_struct *work)
newsk->sk_data_ready = tipc_conn_data_ready;
newsk->sk_write_space = tipc_conn_write_space;
newsk->sk_user_data = con;
- con->sock = newsock;
write_unlock_bh(&newsk->sk_callback_lock);
/* Wake up receive process in case of 'SYN+' message */
newsk->sk_data_ready(newsk);
+ conn_put(con);
}
}
@@ -577,17 +579,17 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
sub.filter = filter;
*(u64 *)&sub.usr_handle = (u64)port;
- con = tipc_conn_alloc(tipc_topsrv(net));
+ con = tipc_conn_alloc(tipc_topsrv(net), NULL);
if (IS_ERR(con))
return false;
*conid = con->conid;
- con->sock = NULL;
rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub);
- if (rc >= 0)
- return true;
+ if (rc)
+ conn_put(con);
+
conn_put(con);
- return false;
+ return !rc;
}
void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index cdb391a8754b..7fbb1d0b69b3 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -346,7 +346,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
salt = tls_ctx->crypto_send.aes_gcm_256.salt;
break;
default:
- return NULL;
+ goto free_req;
}
cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type];
buf_len = cipher_sz->salt + cipher_sz->iv + TLS_AAD_SPACE_SIZE +
@@ -492,7 +492,8 @@ int tls_sw_fallback_init(struct sock *sk,
key = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->key;
break;
default:
- return -EINVAL;
+ rc = -EINVAL;
+ goto free_aead;
}
cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type];
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index da752b0cc752..3d86482e83f5 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -330,7 +330,8 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
* determine if they are the same ie.
*/
if (tmp_old[0] == WLAN_EID_VENDOR_SPECIFIC) {
- if (!memcmp(tmp_old + 2, tmp + 2, 5)) {
+ if (tmp_old[1] >= 5 && tmp[1] >= 5 &&
+ !memcmp(tmp_old + 2, tmp + 2, 5)) {
/* same vendor ie, copy from
* subelement
*/
@@ -2526,10 +2527,15 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
const struct cfg80211_bss_ies *ies1, *ies2;
size_t ielen = len - offsetof(struct ieee80211_mgmt,
u.probe_resp.variable);
- struct cfg80211_non_tx_bss non_tx_data;
+ struct cfg80211_non_tx_bss non_tx_data = {};
res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
len, gfp);
+
+ /* don't do any further MBSSID handling for S1G */
+ if (ieee80211_is_s1g_beacon(mgmt->frame_control))
+ return res;
+
if (!res || !wiphy->support_mbssid ||
!cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
return res;
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 5259ef8f5242..748d8630ab58 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -117,7 +117,7 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
if (!pskb_may_pull(skb, 1)) {
x25_neigh_put(nb);
- return 0;
+ goto drop;
}
switch (skb->data[0]) {
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 5f5aafd418af..21269e8f2db4 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -97,6 +97,18 @@ static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
}
}
+static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb)
+{
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ __u32 seq = xo->seq.low;
+
+ seq += skb_shinfo(skb)->gso_segs;
+ if (unlikely(seq < xo->seq.low))
+ return true;
+
+ return false;
+}
+
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
{
int err;
@@ -134,7 +146,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
return skb;
}
- if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
+ if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
+ unlikely(xmit_xfrm_check_overflow(skb)))) {
struct sk_buff *segs;
/* Packet got rerouted, fixup features and segment it. */
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 9f4d42eb090f..ce56d659c55a 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -714,7 +714,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
oseq += skb_shinfo(skb)->gso_segs;
}
- if (unlikely(oseq < replay_esn->oseq)) {
+ if (unlikely(xo->seq.low < replay_esn->oseq)) {
XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
xo->seq.hi = oseq_hi;
replay_esn->oseq_hi = oseq_hi;
diff --git a/scripts/Makefile.package b/scripts/Makefile.package
index 8bbcced67c22..2a90139ecbe1 100644
--- a/scripts/Makefile.package
+++ b/scripts/Makefile.package
@@ -30,8 +30,8 @@ KBUILD_PKG_ROOTCMD ?="fakeroot -u"
export KDEB_SOURCENAME
# Include only those top-level files that are needed by make, plus the GPL copy
TAR_CONTENT := Documentation LICENSES arch block certs crypto drivers fs \
- include init io_uring ipc kernel lib mm net samples scripts \
- security sound tools usr virt \
+ include init io_uring ipc kernel lib mm net rust \
+ samples scripts security sound tools usr virt \
.config .scmversion Makefile \
Kbuild Kconfig COPYING $(wildcard localversion*)
MKSPEC := $(srctree)/scripts/package/mkspec
diff --git a/scripts/faddr2line b/scripts/faddr2line
index 5514c23f45c2..0e73aca4f908 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -74,7 +74,8 @@ command -v ${ADDR2LINE} >/dev/null 2>&1 || die "${ADDR2LINE} isn't installed"
find_dir_prefix() {
local objfile=$1
- local start_kernel_addr=$(${READELF} --symbols --wide $objfile | ${AWK} '$8 == "start_kernel" {printf "0x%s", $2}')
+ local start_kernel_addr=$(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' |
+ ${AWK} '$8 == "start_kernel" {printf "0x%s", $2}')
[[ -z $start_kernel_addr ]] && return
local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr)
@@ -178,7 +179,7 @@ __faddr2line() {
found=2
break
fi
- done < <(${READELF} --symbols --wide $objfile | ${AWK} -v sec=$sym_sec '$7 == sec' | sort --key=2)
+ done < <(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' | ${AWK} -v sec=$sym_sec '$7 == sec' | sort --key=2)
if [[ $found = 0 ]]; then
warn "can't find symbol: sym_name: $sym_name sym_sec: $sym_sec sym_addr: $sym_addr sym_elf_size: $sym_elf_size"
@@ -259,7 +260,7 @@ __faddr2line() {
DONE=1
- done < <(${READELF} --symbols --wide $objfile | ${AWK} -v fn=$sym_name '$4 == "FUNC" && $8 == fn')
+ done < <(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' | ${AWK} -v fn=$sym_name '$4 == "FUNC" && $8 == fn')
}
[[ $# -lt 2 ]] && usage
diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
index 60a2a63a5e90..a3ac5a716e9f 100755
--- a/scripts/package/mkdebian
+++ b/scripts/package/mkdebian
@@ -90,7 +90,7 @@ if [ -n "$KDEB_PKGVERSION" ]; then
packageversion=$KDEB_PKGVERSION
revision=${packageversion##*-}
else
- revision=$(cat .version 2>/dev/null||echo 1)
+ revision=$($srctree/init/build-version)
packageversion=$version-$revision
fi
sourcename=$KDEB_SOURCENAME
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index b7aee23fc387..47ef6bc30c0e 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -113,15 +113,19 @@ EXPORT_SYMBOL(snd_seq_dump_var_event);
* expand the variable length event to linear buffer space.
*/
-static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
+static int seq_copy_in_kernel(void *ptr, void *src, int size)
{
+ char **bufptr = ptr;
+
memcpy(*bufptr, src, size);
*bufptr += size;
return 0;
}
-static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
+static int seq_copy_in_user(void *ptr, void *src, int size)
{
+ char __user **bufptr = ptr;
+
if (copy_to_user(*bufptr, src, size))
return -EFAULT;
*bufptr += size;
@@ -151,8 +155,7 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char
return newlen;
}
err = snd_seq_dump_var_event(event,
- in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
- (snd_seq_dump_func_t)seq_copy_in_user,
+ in_kernel ? seq_copy_in_kernel : seq_copy_in_user,
&buf);
return err < 0 ? err : newlen;
}
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index f99e00083141..4c677c8546c7 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -59,7 +59,7 @@ int snd_dice_stream_get_rate_mode(struct snd_dice *dice, unsigned int rate,
static int select_clock(struct snd_dice *dice, unsigned int rate)
{
- __be32 reg;
+ __be32 reg, new;
u32 data;
int i;
int err;
@@ -83,15 +83,17 @@ static int select_clock(struct snd_dice *dice, unsigned int rate)
if (completion_done(&dice->clock_accepted))
reinit_completion(&dice->clock_accepted);
- reg = cpu_to_be32(data);
+ new = cpu_to_be32(data);
err = snd_dice_transaction_write_global(dice, GLOBAL_CLOCK_SELECT,
- &reg, sizeof(reg));
+ &new, sizeof(new));
if (err < 0)
return err;
if (wait_for_completion_timeout(&dice->clock_accepted,
- msecs_to_jiffies(NOTIFICATION_TIMEOUT_MS)) == 0)
- return -ETIMEDOUT;
+ msecs_to_jiffies(NOTIFICATION_TIMEOUT_MS)) == 0) {
+ if (reg != new)
+ return -ETIMEDOUT;
+ }
return 0;
}
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index b9eb3208f288..ae31bb127594 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -321,6 +321,11 @@ static const struct config_entry config_table[] = {
}
},
{
+ .flags = FLAG_SOF,
+ .device = 0x34c8,
+ .codec_hid = &essx_83x6,
+ },
+ {
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x34c8,
},
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e18499dd14f0..e5c036385666 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9436,6 +9436,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc1a3, "Samsung Galaxy Book Pro (NP935XDB-KC1SE)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc1a6, "Samsung Galaxy Book Pro 360 (NP930QBD)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP),
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index 6c0f1de10429..d9715bea965e 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -206,6 +206,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UM5302TA"),
}
},
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
+ }
+ },
{}
};
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index 51721edd8f53..e88d9ff95cdf 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -143,7 +143,7 @@ static const struct snd_kcontrol_new cs42l51_snd_controls[] = {
0, 0xA0, 96, adc_att_tlv),
SOC_DOUBLE_R_SX_TLV("PGA Volume",
CS42L51_ALC_PGA_CTL, CS42L51_ALC_PGB_CTL,
- 0, 0x19, 30, pga_tlv),
+ 0, 0x1A, 30, pga_tlv),
SOC_SINGLE("Playback Deemphasis Switch", CS42L51_DAC_CTL, 3, 1, 0),
SOC_SINGLE("Auto-Mute Switch", CS42L51_DAC_CTL, 2, 1, 0),
SOC_SINGLE("Soft Ramp Switch", CS42L51_DAC_CTL, 1, 1, 0),
diff --git a/sound/soc/codecs/hdac_hda.h b/sound/soc/codecs/hdac_hda.h
index fc19c34ca00e..b65560981abb 100644
--- a/sound/soc/codecs/hdac_hda.h
+++ b/sound/soc/codecs/hdac_hda.h
@@ -14,7 +14,7 @@ enum {
HDAC_HDMI_1_DAI_ID,
HDAC_HDMI_2_DAI_ID,
HDAC_HDMI_3_DAI_ID,
- HDAC_LAST_DAI_ID = HDAC_HDMI_3_DAI_ID,
+ HDAC_DAI_ID_NUM
};
struct hdac_hda_pcm {
@@ -24,7 +24,7 @@ struct hdac_hda_pcm {
struct hdac_hda_priv {
struct hda_codec *codec;
- struct hdac_hda_pcm pcm[HDAC_LAST_DAI_ID];
+ struct hdac_hda_pcm pcm[HDAC_DAI_ID_NUM];
bool need_display_power;
};
diff --git a/sound/soc/codecs/max98373-i2c.c b/sound/soc/codecs/max98373-i2c.c
index 3e04c7f0cce4..ec0905df65d1 100644
--- a/sound/soc/codecs/max98373-i2c.c
+++ b/sound/soc/codecs/max98373-i2c.c
@@ -549,6 +549,10 @@ static int max98373_i2c_probe(struct i2c_client *i2c)
max98373->cache = devm_kcalloc(&i2c->dev, max98373->cache_num,
sizeof(*max98373->cache),
GFP_KERNEL);
+ if (!max98373->cache) {
+ ret = -ENOMEM;
+ return ret;
+ }
for (i = 0; i < max98373->cache_num; i++)
max98373->cache[i].reg = max98373_i2c_cache_reg[i];
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 1a25a3787935..362663abcb89 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -298,13 +298,14 @@ static int rt5514_spi_pcm_new(struct snd_soc_component *component,
}
static const struct snd_soc_component_driver rt5514_spi_component = {
- .name = DRV_NAME,
- .probe = rt5514_spi_pcm_probe,
- .open = rt5514_spi_pcm_open,
- .hw_params = rt5514_spi_hw_params,
- .hw_free = rt5514_spi_hw_free,
- .pointer = rt5514_spi_pcm_pointer,
- .pcm_construct = rt5514_spi_pcm_new,
+ .name = DRV_NAME,
+ .probe = rt5514_spi_pcm_probe,
+ .open = rt5514_spi_pcm_open,
+ .hw_params = rt5514_spi_hw_params,
+ .hw_free = rt5514_spi_hw_free,
+ .pointer = rt5514_spi_pcm_pointer,
+ .pcm_construct = rt5514_spi_pcm_new,
+ .legacy_dai_naming = 1,
};
/**
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index 8f3993a4c1cc..d25703dd7499 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -396,15 +396,16 @@ static int rt5677_spi_pcm_probe(struct snd_soc_component *component)
}
static const struct snd_soc_component_driver rt5677_spi_dai_component = {
- .name = DRV_NAME,
- .probe = rt5677_spi_pcm_probe,
- .open = rt5677_spi_pcm_open,
- .close = rt5677_spi_pcm_close,
- .hw_params = rt5677_spi_hw_params,
- .hw_free = rt5677_spi_hw_free,
- .prepare = rt5677_spi_prepare,
- .pointer = rt5677_spi_pcm_pointer,
- .pcm_construct = rt5677_spi_pcm_new,
+ .name = DRV_NAME,
+ .probe = rt5677_spi_pcm_probe,
+ .open = rt5677_spi_pcm_open,
+ .close = rt5677_spi_pcm_close,
+ .hw_params = rt5677_spi_hw_params,
+ .hw_free = rt5677_spi_hw_free,
+ .prepare = rt5677_spi_prepare,
+ .pointer = rt5677_spi_pcm_pointer,
+ .pcm_construct = rt5677_spi_pcm_new,
+ .legacy_dai_naming = 1,
};
/* Select a suitable transfer command for the next transfer to ensure
diff --git a/sound/soc/codecs/rt711-sdca-sdw.c b/sound/soc/codecs/rt711-sdca-sdw.c
index 4120842fe699..88a8392a58ed 100644
--- a/sound/soc/codecs/rt711-sdca-sdw.c
+++ b/sound/soc/codecs/rt711-sdca-sdw.c
@@ -230,7 +230,7 @@ static int rt711_sdca_read_prop(struct sdw_slave *slave)
}
/* set the timeout values */
- prop->clk_stop_timeout = 20;
+ prop->clk_stop_timeout = 700;
/* wake-up event */
prop->wake_capable = 1;
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 4b2135eba74d..a916f4619ea3 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1794,6 +1794,7 @@ static void sgtl5000_i2c_remove(struct i2c_client *client)
{
struct sgtl5000_priv *sgtl5000 = i2c_get_clientdata(client);
+ regmap_write(sgtl5000->regmap, SGTL5000_CHIP_CLK_CTRL, SGTL5000_CHIP_CLK_CTRL_DEFAULT);
regmap_write(sgtl5000->regmap, SGTL5000_CHIP_DIG_POWER, SGTL5000_DIG_POWER_DEFAULT);
regmap_write(sgtl5000->regmap, SGTL5000_CHIP_ANA_POWER, SGTL5000_ANA_POWER_DEFAULT);
diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c
index 51b87a936179..2e0ed3e68fa5 100644
--- a/sound/soc/codecs/tas2764.c
+++ b/sound/soc/codecs/tas2764.c
@@ -438,20 +438,13 @@ static int tas2764_set_dai_tdm_slot(struct snd_soc_dai *dai,
if (tx_mask == 0 || rx_mask != 0)
return -EINVAL;
- if (slots == 1) {
- if (tx_mask != 1)
- return -EINVAL;
- left_slot = 0;
- right_slot = 0;
+ left_slot = __ffs(tx_mask);
+ tx_mask &= ~(1 << left_slot);
+ if (tx_mask == 0) {
+ right_slot = left_slot;
} else {
- left_slot = __ffs(tx_mask);
- tx_mask &= ~(1 << left_slot);
- if (tx_mask == 0) {
- right_slot = left_slot;
- } else {
- right_slot = __ffs(tx_mask);
- tx_mask &= ~(1 << right_slot);
- }
+ right_slot = __ffs(tx_mask);
+ tx_mask &= ~(1 << right_slot);
}
if (tx_mask != 0 || left_slot >= slots || right_slot >= slots)
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
index b6765235a4b3..8557759acb1f 100644
--- a/sound/soc/codecs/tas2770.c
+++ b/sound/soc/codecs/tas2770.c
@@ -395,21 +395,13 @@ static int tas2770_set_dai_tdm_slot(struct snd_soc_dai *dai,
if (tx_mask == 0 || rx_mask != 0)
return -EINVAL;
- if (slots == 1) {
- if (tx_mask != 1)
- return -EINVAL;
-
- left_slot = 0;
- right_slot = 0;
+ left_slot = __ffs(tx_mask);
+ tx_mask &= ~(1 << left_slot);
+ if (tx_mask == 0) {
+ right_slot = left_slot;
} else {
- left_slot = __ffs(tx_mask);
- tx_mask &= ~(1 << left_slot);
- if (tx_mask == 0) {
- right_slot = left_slot;
- } else {
- right_slot = __ffs(tx_mask);
- tx_mask &= ~(1 << right_slot);
- }
+ right_slot = __ffs(tx_mask);
+ tx_mask &= ~(1 << right_slot);
}
if (tx_mask != 0 || left_slot >= slots || right_slot >= slots)
diff --git a/sound/soc/codecs/tas2780.c b/sound/soc/codecs/tas2780.c
index a6db6f0e5431..afdf0c863aa1 100644
--- a/sound/soc/codecs/tas2780.c
+++ b/sound/soc/codecs/tas2780.c
@@ -380,20 +380,13 @@ static int tas2780_set_dai_tdm_slot(struct snd_soc_dai *dai,
if (tx_mask == 0 || rx_mask != 0)
return -EINVAL;
- if (slots == 1) {
- if (tx_mask != 1)
- return -EINVAL;
- left_slot = 0;
- right_slot = 0;
+ left_slot = __ffs(tx_mask);
+ tx_mask &= ~(1 << left_slot);
+ if (tx_mask == 0) {
+ right_slot = left_slot;
} else {
- left_slot = __ffs(tx_mask);
- tx_mask &= ~(1 << left_slot);
- if (tx_mask == 0) {
- right_slot = left_slot;
- } else {
- right_slot = __ffs(tx_mask);
- tx_mask &= ~(1 << right_slot);
- }
+ right_slot = __ffs(tx_mask);
+ tx_mask &= ~(1 << right_slot);
}
if (tx_mask != 0 || left_slot >= slots || right_slot >= slots)
diff --git a/sound/soc/codecs/tlv320adc3xxx.c b/sound/soc/codecs/tlv320adc3xxx.c
index a969547708d4..52bb55724724 100644
--- a/sound/soc/codecs/tlv320adc3xxx.c
+++ b/sound/soc/codecs/tlv320adc3xxx.c
@@ -14,6 +14,7 @@
#include <dt-bindings/sound/tlv320adc3xxx.h>
#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/io.h>
@@ -1025,7 +1026,9 @@ static const struct gpio_chip adc3xxx_gpio_chip = {
static void adc3xxx_free_gpio(struct adc3xxx *adc3xxx)
{
+#ifdef CONFIG_GPIOLIB
gpiochip_remove(&adc3xxx->gpio_chip);
+#endif
}
static void adc3xxx_init_gpio(struct adc3xxx *adc3xxx)
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index b4b4355c6728..b901e4c65e8a 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2503,6 +2503,14 @@ static void wm8962_configure_bclk(struct snd_soc_component *component)
snd_soc_component_update_bits(component, WM8962_CLOCKING2,
WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA);
+ /* DSPCLK_DIV field in WM8962_CLOCKING1 register is used to generate
+ * correct frequency of LRCLK and BCLK. Sometimes the read-only value
+ * can't be updated timely after enabling SYSCLK. This results in wrong
+ * calculation values. Delay is introduced here to wait for newest
+ * value from register. The time of the delay should be at least
+ * 500~1000us according to test.
+ */
+ usleep_range(500, 1000);
dspclk = snd_soc_component_read(component, WM8962_CLOCKING1);
if (snd_soc_component_get_bias_level(component) != SND_SOC_BIAS_ON)
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index 936aef5d2767..e16e7b3fa96c 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -1232,7 +1232,7 @@ static int fsl_asrc_probe(struct platform_device *pdev)
}
ret = pm_runtime_put_sync(&pdev->dev);
- if (ret < 0)
+ if (ret < 0 && ret != -ENOSYS)
goto err_pm_get_sync;
ret = devm_snd_soc_register_component(&pdev->dev, &fsl_asrc_component,
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 5c21fc490fce..17fefd27ec90 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -1069,7 +1069,7 @@ static int fsl_esai_probe(struct platform_device *pdev)
regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
ret = pm_runtime_put_sync(&pdev->dev);
- if (ret < 0)
+ if (ret < 0 && ret != -ENOSYS)
goto err_pm_get_sync;
/*
diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
index 79ef4e269bc9..4b86ef82fd93 100644
--- a/sound/soc/fsl/fsl_micfil.c
+++ b/sound/soc/fsl/fsl_micfil.c
@@ -194,6 +194,25 @@ static int fsl_micfil_reset(struct device *dev)
if (ret)
return ret;
+ /*
+ * SRES is self-cleared bit, but REG_MICFIL_CTRL1 is defined
+ * as non-volatile register, so SRES still remain in regmap
+ * cache after set, that every update of REG_MICFIL_CTRL1,
+ * software reset happens. so clear it explicitly.
+ */
+ ret = regmap_clear_bits(micfil->regmap, REG_MICFIL_CTRL1,
+ MICFIL_CTRL1_SRES);
+ if (ret)
+ return ret;
+
+ /*
+ * Set SRES should clear CHnF flags, But even add delay here
+ * the CHnF may not be cleared sometimes, so clear CHnF explicitly.
+ */
+ ret = regmap_write_bits(micfil->regmap, REG_MICFIL_STAT, 0xFF, 0xFF);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 81f89f6767a2..e60c7b344562 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -1446,7 +1446,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
}
ret = pm_runtime_put_sync(dev);
- if (ret < 0)
+ if (ret < 0 && ret != -ENOSYS)
goto err_pm_get_sync;
/*
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index 6432b83f616f..a935c5fd9edb 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -443,6 +443,13 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = {
| BYT_CHT_ES8316_INTMIC_IN2_MAP
| BYT_CHT_ES8316_JD_INVERTED),
},
+ { /* Nanote UMPC-01 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UMPC-01"),
+ },
+ .driver_data = (void *)BYT_CHT_ES8316_INTMIC_IN1_MAP,
+ },
{ /* Teclast X98 Plus II */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c
index fbb42e54947a..70713e4b07dc 100644
--- a/sound/soc/intel/boards/sof_es8336.c
+++ b/sound/soc/intel/boards/sof_es8336.c
@@ -63,6 +63,7 @@ struct sof_es8336_private {
struct snd_soc_jack jack;
struct list_head hdmi_pcm_list;
bool speaker_en;
+ struct delayed_work pcm_pop_work;
};
struct sof_hdmi_pcm {
@@ -111,6 +112,46 @@ static void log_quirks(struct device *dev)
dev_info(dev, "quirk headset at mic1 port enabled\n");
}
+static void pcm_pop_work_events(struct work_struct *work)
+{
+ struct sof_es8336_private *priv =
+ container_of(work, struct sof_es8336_private, pcm_pop_work.work);
+
+ gpiod_set_value_cansleep(priv->gpio_speakers, priv->speaker_en);
+
+ if (quirk & SOF_ES8336_HEADPHONE_GPIO)
+ gpiod_set_value_cansleep(priv->gpio_headphone, priv->speaker_en);
+
+}
+
+static int sof_8336_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_card *card = rtd->card;
+ struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ break;
+
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ if (priv->speaker_en == false)
+ if (substream->stream == 0) {
+ cancel_delayed_work(&priv->pcm_pop_work);
+ gpiod_set_value_cansleep(priv->gpio_speakers, true);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -122,19 +163,7 @@ static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
priv->speaker_en = !SND_SOC_DAPM_EVENT_ON(event);
- if (SND_SOC_DAPM_EVENT_ON(event))
- msleep(70);
-
- gpiod_set_value_cansleep(priv->gpio_speakers, priv->speaker_en);
-
- if (!(quirk & SOF_ES8336_HEADPHONE_GPIO))
- return 0;
-
- if (SND_SOC_DAPM_EVENT_ON(event))
- msleep(70);
-
- gpiod_set_value_cansleep(priv->gpio_headphone, priv->speaker_en);
-
+ queue_delayed_work(system_wq, &priv->pcm_pop_work, msecs_to_jiffies(70));
return 0;
}
@@ -344,6 +373,7 @@ static int sof_es8336_hw_params(struct snd_pcm_substream *substream,
/* machine stream operations */
static struct snd_soc_ops sof_es8336_ops = {
.hw_params = sof_es8336_hw_params,
+ .trigger = sof_8336_trigger,
};
static struct snd_soc_dai_link_component platform_component[] = {
@@ -723,7 +753,8 @@ static int sof_es8336_probe(struct platform_device *pdev)
}
INIT_LIST_HEAD(&priv->hdmi_pcm_list);
-
+ INIT_DELAYED_WORK(&priv->pcm_pop_work,
+ pcm_pop_work_events);
snd_soc_card_set_drvdata(card, priv);
if (mach->mach_params.dmic_num > 0) {
@@ -752,6 +783,7 @@ static int sof_es8336_remove(struct platform_device *pdev)
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
+ cancel_delayed_work(&priv->pcm_pop_work);
gpiod_put(priv->gpio_speakers);
device_remove_software_node(priv->codec_dev);
put_device(priv->codec_dev);
diff --git a/sound/soc/intel/common/soc-acpi-intel-icl-match.c b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
index b032bc07de8b..d0062f2cd256 100644
--- a/sound/soc/intel/common/soc-acpi-intel-icl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
@@ -10,6 +10,11 @@
#include <sound/soc-acpi-intel-match.h>
#include "../skylake/skl.h"
+static const struct snd_soc_acpi_codecs essx_83x6 = {
+ .num_codecs = 3,
+ .codecs = { "ESSX8316", "ESSX8326", "ESSX8336"},
+};
+
static struct skl_machine_pdata icl_pdata = {
.use_tplg_pcm = true,
};
@@ -27,6 +32,14 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[] = {
.drv_name = "sof_rt5682",
.sof_tplg_filename = "sof-icl-rt5682.tplg",
},
+ {
+ .comp_ids = &essx_83x6,
+ .drv_name = "sof-essx8336",
+ .sof_tplg_filename = "sof-icl-es8336", /* the tplg suffix is added at run time */
+ .tplg_quirk_mask = SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER |
+ SND_SOC_ACPI_TPLG_INTEL_SSP_MSB |
+ SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER,
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_icl_machines);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 12a82f5a3ff6..a409fbed8f34 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3477,10 +3477,23 @@ EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_link_cpus);
static int __init snd_soc_init(void)
{
+ int ret;
+
snd_soc_debugfs_init();
- snd_soc_util_init();
+ ret = snd_soc_util_init();
+ if (ret)
+ goto err_util_init;
- return platform_driver_register(&soc_driver);
+ ret = platform_driver_register(&soc_driver);
+ if (ret)
+ goto err_register;
+ return 0;
+
+err_register:
+ snd_soc_util_exit();
+err_util_init:
+ snd_soc_debugfs_exit();
+ return ret;
}
module_init(snd_soc_init);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index d515e7a78ea8..879cf1be67a9 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3645,7 +3645,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
switch (w->id) {
case snd_soc_dapm_regulator_supply:
- w->regulator = devm_regulator_get(dapm->dev, w->name);
+ w->regulator = devm_regulator_get(dapm->dev, widget->name);
if (IS_ERR(w->regulator)) {
ret = PTR_ERR(w->regulator);
goto request_failed;
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index bd88de056358..55b009d3c681 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -452,7 +452,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
val = ucontrol->value.integer.value[0];
if (mc->platform_max && val > mc->platform_max)
return -EINVAL;
- if (val > max - min)
+ if (val > max)
return -EINVAL;
val_mask = mask << shift;
val = (val + min) & mask;
@@ -464,10 +464,15 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
ret = err;
if (snd_soc_volsw_is_stereo(mc)) {
- unsigned int val2;
+ unsigned int val2 = ucontrol->value.integer.value[1];
+
+ if (mc->platform_max && val2 > mc->platform_max)
+ return -EINVAL;
+ if (val2 > max)
+ return -EINVAL;
val_mask = mask << rshift;
- val2 = (ucontrol->value.integer.value[1] + min) & mask;
+ val2 = (val2 + min) & mask;
val2 = val2 << rshift;
err = snd_soc_component_update_bits(component, reg2, val_mask,
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index fb87d6d23408..35a16c3f9591 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -822,11 +822,6 @@ static int __soc_pcm_open(struct snd_soc_pcm_runtime *rtd,
ret = snd_soc_dai_startup(dai, substream);
if (ret < 0)
goto err;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- dai->tx_mask = 0;
- else
- dai->rx_mask = 0;
}
/* Dynamic PCM DAI links compat checks use dynamic capabilities */
@@ -1252,6 +1247,8 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
return;
be_substream = snd_soc_dpcm_get_substream(be, stream);
+ if (!be_substream)
+ return;
for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index a3b6df2378b4..a4dba0b751e7 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -264,7 +264,7 @@ int __init snd_soc_util_init(void)
return ret;
}
-void __exit snd_soc_util_exit(void)
+void snd_soc_util_exit(void)
{
platform_driver_unregister(&soc_dummy_driver);
platform_device_unregister(soc_dummy_dev);
diff --git a/sound/soc/sof/ipc3-topology.c b/sound/soc/sof/ipc3-topology.c
index c148715aa0f9..0720e1eae084 100644
--- a/sound/soc/sof/ipc3-topology.c
+++ b/sound/soc/sof/ipc3-topology.c
@@ -2275,6 +2275,7 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif
struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
struct snd_sof_widget *swidget;
struct snd_sof_route *sroute;
+ bool dyn_widgets = false;
int ret;
/*
@@ -2284,12 +2285,14 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif
* topology loading the sound card unavailable to open PCMs.
*/
list_for_each_entry(swidget, &sdev->widget_list, list) {
- if (swidget->dynamic_pipeline_widget)
+ if (swidget->dynamic_pipeline_widget) {
+ dyn_widgets = true;
continue;
+ }
- /* Do not free widgets for static pipelines with FW ABI older than 3.19 */
+ /* Do not free widgets for static pipelines with FW older than SOF2.2 */
if (!verify && !swidget->dynamic_pipeline_widget &&
- v->abi_version < SOF_ABI_VER(3, 19, 0)) {
+ SOF_FW_VER(v->major, v->minor, v->micro) < SOF_FW_VER(2, 2, 0)) {
swidget->use_count = 0;
swidget->complete = 0;
continue;
@@ -2303,9 +2306,11 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif
/*
* Tear down all pipelines associated with PCMs that did not get suspended
* and unset the prepare flag so that they can be set up again during resume.
- * Skip this step for older firmware.
+ * Skip this step for older firmware unless topology has any
+ * dynamic pipeline (in which case the step is mandatory).
*/
- if (!verify && v->abi_version >= SOF_ABI_VER(3, 19, 0)) {
+ if (!verify && (dyn_widgets || SOF_FW_VER(v->major, v->minor, v->micro) >=
+ SOF_FW_VER(2, 2, 0))) {
ret = sof_tear_down_left_over_pipelines(sdev);
if (ret < 0) {
dev_err(sdev->dev, "failed to tear down paused pipelines\n");
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index 38855dd60617..6a0e7f3b5023 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -1344,16 +1344,6 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index,
break;
}
- if (sof_debug_check_flag(SOF_DBG_DISABLE_MULTICORE)) {
- swidget->core = SOF_DSP_PRIMARY_CORE;
- } else {
- int core = sof_get_token_value(SOF_TKN_COMP_CORE_ID, swidget->tuples,
- swidget->num_tuples);
-
- if (core >= 0)
- swidget->core = core;
- }
-
/* check token parsing reply */
if (ret < 0) {
dev_err(scomp->dev,
@@ -1365,6 +1355,16 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index,
return ret;
}
+ if (sof_debug_check_flag(SOF_DBG_DISABLE_MULTICORE)) {
+ swidget->core = SOF_DSP_PRIMARY_CORE;
+ } else {
+ int core = sof_get_token_value(SOF_TKN_COMP_CORE_ID, swidget->tuples,
+ swidget->num_tuples);
+
+ if (core >= 0)
+ swidget->core = core;
+ }
+
/* bind widget to external event */
if (tw->event_type) {
if (widget_ops[w->id].bind_event) {
diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c
index 643fc8a17018..837c1848d9bf 100644
--- a/sound/soc/stm/stm32_adfsdm.c
+++ b/sound/soc/stm/stm32_adfsdm.c
@@ -304,6 +304,11 @@ static int stm32_adfsdm_dummy_cb(const void *data, void *private)
return 0;
}
+static void stm32_adfsdm_cleanup(void *data)
+{
+ iio_channel_release_all_cb(data);
+}
+
static struct snd_soc_component_driver stm32_adfsdm_soc_platform = {
.open = stm32_adfsdm_pcm_open,
.close = stm32_adfsdm_pcm_close,
@@ -350,6 +355,12 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
if (IS_ERR(priv->iio_cb))
return PTR_ERR(priv->iio_cb);
+ ret = devm_add_action_or_reset(&pdev->dev, stm32_adfsdm_cleanup, priv->iio_cb);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to add action\n");
+ return ret;
+ }
+
component = devm_kzalloc(&pdev->dev, sizeof(*component), GFP_KERNEL);
if (!component)
return -ENOMEM;
diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c
index ce7f6942308f..f3dd9f8e621c 100644
--- a/sound/soc/stm/stm32_i2s.c
+++ b/sound/soc/stm/stm32_i2s.c
@@ -1077,7 +1077,7 @@ static int stm32_i2s_parse_dt(struct platform_device *pdev,
if (irq < 0)
return irq;
- ret = devm_request_irq(&pdev->dev, irq, stm32_i2s_isr, IRQF_ONESHOT,
+ ret = devm_request_irq(&pdev->dev, irq, stm32_i2s_isr, 0,
dev_name(&pdev->dev), i2s);
if (ret) {
dev_err(&pdev->dev, "irq request returned %d\n", ret);
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index bbff0923d264..2839f6b6f09b 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1133,10 +1133,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
port = &umidi->endpoints[i].out->ports[j];
break;
}
- if (!port) {
- snd_BUG();
+ if (!port)
return -ENXIO;
- }
substream->runtime->private_data = port;
port->state = STATE_UNKNOWN;
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 10ac52705892..f17ade084720 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -535,6 +535,11 @@
#define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
+
+#define MSR_AMD64_DE_CFG 0xc0011029
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
+
#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
@@ -640,9 +645,6 @@
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
#define MSR_FAM10H_NODE_ID 0xc001100c
-#define MSR_F10H_DECFG 0xc0011029
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 2491c54a5e4f..f8deae4e26a1 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -715,12 +715,12 @@ int main(int argc, char **argv)
continue;
}
- toread = buf_len;
} else {
usleep(timedelay);
- toread = 64;
}
+ toread = buf_len;
+
read_size = read(buf_fd, data, toread * scan_size);
if (read_size < 0) {
if (errno == EAGAIN) {
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 184ce1684dcd..91b7106a4a73 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -11169,7 +11169,7 @@ static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf
}
*link = bpf_program__attach_raw_tracepoint(prog, tp_name);
- return libbpf_get_error(link);
+ return libbpf_get_error(*link);
}
/* Common logic for all BPF program types that attach to a btf_id */
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index f3a8e8e74eb8..d504d96adc83 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -234,7 +234,7 @@ static int probe_map_create(enum bpf_map_type map_type)
case BPF_MAP_TYPE_USER_RINGBUF:
key_size = 0;
value_size = 0;
- max_entries = 4096;
+ max_entries = sysconf(_SC_PAGE_SIZE);
break;
case BPF_MAP_TYPE_STRUCT_OPS:
/* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index d285171d4b69..6af142953a94 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -77,6 +77,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
__u32 len = sizeof(info);
struct epoll_event *e;
struct ring *r;
+ __u64 mmap_sz;
void *tmp;
int err;
@@ -115,8 +116,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
r->mask = info.max_entries - 1;
/* Map writable consumer page */
- tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
- map_fd, 0);
+ tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
if (tmp == MAP_FAILED) {
err = -errno;
pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
@@ -129,8 +129,12 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
* data size to allow simple reading of samples that wrap around the
* end of a ring buffer. See kernel implementation for details.
* */
- tmp = mmap(NULL, rb->page_size + 2 * info.max_entries, PROT_READ,
- MAP_SHARED, map_fd, rb->page_size);
+ mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
+ if (mmap_sz != (__u64)(size_t)mmap_sz) {
+ pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries);
+ return libbpf_err(-E2BIG);
+ }
+ tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
if (tmp == MAP_FAILED) {
err = -errno;
ringbuf_unmap_ring(rb, r);
@@ -348,6 +352,7 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
{
struct bpf_map_info info;
__u32 len = sizeof(info);
+ __u64 mmap_sz;
void *tmp;
struct epoll_event *rb_epoll;
int err;
@@ -384,8 +389,13 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
* simple reading and writing of samples that wrap around the end of
* the buffer. See the kernel implementation for details.
*/
- tmp = mmap(NULL, rb->page_size + 2 * info.max_entries,
- PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, rb->page_size);
+ mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
+ if (mmap_sz != (__u64)(size_t)mmap_sz) {
+ pr_warn("user ringbuf: ring buf size (%u) is too big\n", info.max_entries);
+ return -E2BIG;
+ }
+ tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
+ map_fd, rb->page_size);
if (tmp == MAP_FAILED) {
err = -errno;
pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %d\n",
@@ -476,6 +486,10 @@ void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size)
__u64 cons_pos, prod_pos;
struct ringbuf_hdr *hdr;
+ /* The top two bits are used as special flags */
+ if (size & (BPF_RINGBUF_BUSY_BIT | BPF_RINGBUF_DISCARD_BIT))
+ return errno = E2BIG, NULL;
+
/* Synchronizes with smp_store_release() in __bpf_user_ringbuf_peek() in
* the kernel.
*/
diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
index 099eb4dfd4f7..18405c3b7cee 100644
--- a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
+++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
@@ -458,7 +458,7 @@ static void test_sk_storage_map_basic(void)
struct {
int cnt;
int lock;
- } value = { .cnt = 0xeB9f, .lock = 0, }, lookup_value;
+ } value = { .cnt = 0xeB9f, .lock = 1, }, lookup_value;
struct bpf_map_create_opts bad_xattr;
int btf_fd, map_fd, sk_fd, err;
@@ -483,38 +483,41 @@ static void test_sk_storage_map_basic(void)
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
- CHECK(err || lookup_value.cnt != value.cnt,
+ CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
- "err:%d errno:%d cnt:%x(%x)\n",
- err, errno, lookup_value.cnt, value.cnt);
+ "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+ err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Bump the cnt and update with BPF_EXIST | BPF_F_LOCK */
value.cnt += 1;
+ value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value,
BPF_EXIST | BPF_F_LOCK);
CHECK(err, "bpf_map_update_elem(BPF_EXIST|BPF_F_LOCK)",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
- CHECK(err || lookup_value.cnt != value.cnt,
+ CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
- "err:%d errno:%d cnt:%x(%x)\n",
- err, errno, lookup_value.cnt, value.cnt);
+ "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+ err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Bump the cnt and update with BPF_EXIST */
value.cnt += 1;
+ value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_EXIST);
CHECK(err, "bpf_map_update_elem(BPF_EXIST)",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
- CHECK(err || lookup_value.cnt != value.cnt,
+ CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
- "err:%d errno:%d cnt:%x(%x)\n",
- err, errno, lookup_value.cnt, value.cnt);
+ "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+ err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Update with BPF_NOEXIST */
value.cnt += 1;
+ value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value,
BPF_NOEXIST | BPF_F_LOCK);
CHECK(!err || errno != EEXIST,
@@ -526,22 +529,23 @@ static void test_sk_storage_map_basic(void)
value.cnt -= 1;
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
- CHECK(err || lookup_value.cnt != value.cnt,
+ CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
- "err:%d errno:%d cnt:%x(%x)\n",
- err, errno, lookup_value.cnt, value.cnt);
+ "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+ err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Bump the cnt again and update with map_flags == 0 */
value.cnt += 1;
+ value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
CHECK(err, "bpf_map_update_elem()", "err:%d errno:%d\n",
err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
- CHECK(err || lookup_value.cnt != value.cnt,
+ CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
- "err:%d errno:%d cnt:%x(%x)\n",
- err, errno, lookup_value.cnt, value.cnt);
+ "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+ err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Test delete elem */
err = bpf_map_delete_elem(map_fd, &sk_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
index d457a55ff408..a4b4133d39e9 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -358,10 +358,12 @@ static int get_syms(char ***symsp, size_t *cntp)
* We attach to almost all kernel functions and some of them
* will cause 'suspicious RCU usage' when fprobe is attached
* to them. Filter out the current culprits - arch_cpu_idle
- * and rcu_* functions.
+ * default_idle and rcu_* functions.
*/
if (!strcmp(name, "arch_cpu_idle"))
continue;
+ if (!strcmp(name, "default_idle"))
+ continue;
if (!strncmp(name, "rcu_", 4))
continue;
if (!strcmp(name, "bpf_dispatcher_xdp_func"))
@@ -400,7 +402,7 @@ error:
return err;
}
-static void test_bench_attach(void)
+void serial_test_kprobe_multi_bench_attach(void)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
struct kprobe_multi_empty *skel = NULL;
@@ -468,6 +470,4 @@ void test_kprobe_multi_test(void)
test_attach_api_syms();
if (test__start_subtest("attach_api_fails"))
test_attach_api_fails();
- if (test__start_subtest("bench_attach"))
- test_bench_attach();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/varlen.c b/tools/testing/selftests/bpf/prog_tests/varlen.c
index dd324b4933db..4d7056f8f177 100644
--- a/tools/testing/selftests/bpf/prog_tests/varlen.c
+++ b/tools/testing/selftests/bpf/prog_tests/varlen.c
@@ -63,6 +63,13 @@ void test_varlen(void)
CHECK_VAL(data->total4, size1 + size2);
CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
"doesn't match!\n");
+
+ CHECK_VAL(bss->ret_bad_read, -EFAULT);
+ CHECK_VAL(data->payload_bad[0], 0x42);
+ CHECK_VAL(data->payload_bad[1], 0x42);
+ CHECK_VAL(data->payload_bad[2], 0);
+ CHECK_VAL(data->payload_bad[3], 0x42);
+ CHECK_VAL(data->payload_bad[4], 0x42);
cleanup:
test_varlen__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/progs/test_varlen.c b/tools/testing/selftests/bpf/progs/test_varlen.c
index 3987ff174f1f..20eb7d422c41 100644
--- a/tools/testing/selftests/bpf/progs/test_varlen.c
+++ b/tools/testing/selftests/bpf/progs/test_varlen.c
@@ -19,6 +19,7 @@ __u64 payload1_len1 = 0;
__u64 payload1_len2 = 0;
__u64 total1 = 0;
char payload1[MAX_LEN + MAX_LEN] = {};
+__u64 ret_bad_read = 0;
/* .data */
int payload2_len1 = -1;
@@ -36,6 +37,8 @@ int payload4_len2 = -1;
int total4= -1;
char payload4[MAX_LEN + MAX_LEN] = { 1 };
+char payload_bad[5] = { 0x42, 0x42, 0x42, 0x42, 0x42 };
+
SEC("raw_tp/sys_enter")
int handler64_unsigned(void *regs)
{
@@ -61,6 +64,8 @@ int handler64_unsigned(void *regs)
total1 = payload - (void *)payload1;
+ ret_bad_read = bpf_probe_read_kernel_str(payload_bad + 2, 1, (void *) -1);
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 0e9a47f97890..3fef451d8831 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -1010,7 +1010,7 @@ static inline const char *str_msg(const struct msg *msg, char *buf)
msg->subtest_done.have_log);
break;
case MSG_TEST_LOG:
- sprintf(buf, "MSG_TEST_LOG (cnt: %ld, last: %d)",
+ sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)",
strlen(msg->test_log.log_buf),
msg->test_log.is_last);
break;
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 2dbcbf363c18..b605a70d4f6b 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1260,7 +1260,7 @@ static int get_xlated_program(int fd_prog, struct bpf_insn **buf, int *cnt)
bzero(&info, sizeof(info));
info.xlated_prog_len = xlated_prog_len;
- info.xlated_prog_insns = (__u64)*buf;
+ info.xlated_prog_insns = (__u64)(unsigned long)*buf;
if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) {
perror("second bpf_obj_get_info_by_fd failed");
goto out_free_buf;
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 2f0d705db9db..05d980fb083d 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -41,6 +41,7 @@
/x86_64/svm_vmcall_test
/x86_64/svm_int_ctl_test
/x86_64/svm_nested_soft_inject_test
+/x86_64/svm_nested_shutdown_test
/x86_64/sync_regs_test
/x86_64/tsc_msrs_test
/x86_64/tsc_scaling_sync
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 0172eb6cb6ee..4a2caef2c939 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -101,6 +101,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
+TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_shutdown_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_soft_inject_test
TEST_GEN_PROGS_x86_64 += x86_64/tsc_scaling_sync
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index e8ca0d8a6a7e..5da0c5e2a7af 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -748,6 +748,19 @@ struct ex_regs {
uint64_t rflags;
};
+struct idt_entry {
+ uint16_t offset0;
+ uint16_t selector;
+ uint16_t ist : 3;
+ uint16_t : 5;
+ uint16_t type : 4;
+ uint16_t : 1;
+ uint16_t dpl : 2;
+ uint16_t p : 1;
+ uint16_t offset1;
+ uint32_t offset2; uint32_t reserved;
+};
+
void vm_init_descriptor_tables(struct kvm_vm *vm);
void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 39c4409ef56a..41c1c73c464d 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1074,19 +1074,6 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
}
}
-struct idt_entry {
- uint16_t offset0;
- uint16_t selector;
- uint16_t ist : 3;
- uint16_t : 5;
- uint16_t type : 4;
- uint16_t : 1;
- uint16_t dpl : 2;
- uint16_t p : 1;
- uint16_t offset1;
- uint32_t offset2; uint32_t reserved;
-};
-
static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
int dpl, unsigned short selector)
{
diff --git a/tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c b/tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c
new file mode 100644
index 000000000000..e73fcdef47bb
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * svm_nested_shutdown_test
+ *
+ * Copyright (C) 2022, Red Hat, Inc.
+ *
+ * Nested SVM testing: test that unintercepted shutdown in L2 doesn't crash the host
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+
+static void l2_guest_code(struct svm_test_data *svm)
+{
+ __asm__ __volatile__("ud2");
+}
+
+static void l1_guest_code(struct svm_test_data *svm, struct idt_entry *idt)
+{
+ #define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ struct vmcb *vmcb = svm->vmcb;
+
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN));
+
+ idt[6].p = 0; // #UD is intercepted but its injection will cause #NP
+ idt[11].p = 0; // #NP is not intercepted and will cause another
+ // #NP that will be converted to #DF
+ idt[8].p = 0; // #DF will cause #NP which will cause SHUTDOWN
+
+ run_guest(vmcb, svm->vmcb_gpa);
+
+ /* should not reach here */
+ GUEST_ASSERT(0);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_run *run;
+ vm_vaddr_t svm_gva;
+ struct kvm_vm *vm;
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vcpu);
+
+ vcpu_alloc_svm(vm, &svm_gva);
+
+ vcpu_args_set(vcpu, 2, svm_gva, vm->idt);
+ run = vcpu->run;
+
+ vcpu_run(vcpu);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c b/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c
index 70b44f0b52fe..ead5d878a71c 100644
--- a/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c
+++ b/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c
@@ -3,6 +3,7 @@
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
+#include "svm_util.h"
#include <string.h>
#include <sys/ioctl.h>
@@ -20,10 +21,11 @@ static void l2_guest_code(void)
: : [port] "d" (ARBITRARY_IO_PORT) : "rax");
}
-void l1_guest_code(struct vmx_pages *vmx)
-{
#define L2_GUEST_STACK_SIZE 64
- unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+void l1_guest_code_vmx(struct vmx_pages *vmx)
+{
GUEST_ASSERT(vmx->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx));
@@ -38,24 +40,53 @@ void l1_guest_code(struct vmx_pages *vmx)
GUEST_DONE();
}
+void l1_guest_code_svm(struct svm_test_data *svm)
+{
+ struct vmcb *vmcb = svm->vmcb;
+
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ /* don't intercept shutdown to test the case of SVM allowing to do so */
+ vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN));
+
+ run_guest(vmcb, svm->vmcb_gpa);
+
+ /* should not reach here, L1 should crash */
+ GUEST_ASSERT(0);
+}
+
int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vcpu_events events;
- vm_vaddr_t vmx_pages_gva;
struct ucall uc;
- TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+ bool has_vmx = kvm_cpu_has(X86_FEATURE_VMX);
+ bool has_svm = kvm_cpu_has(X86_FEATURE_SVM);
+
+ TEST_REQUIRE(has_vmx || has_svm);
TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT));
- vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
- vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
+ if (has_vmx) {
+ vm_vaddr_t vmx_pages_gva;
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_vmx);
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
+ } else {
+ vm_vaddr_t svm_gva;
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_svm);
+ vcpu_alloc_svm(vm, &svm_gva);
+ vcpu_args_set(vcpu, 1, svm_gva);
+ }
+
+ vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
run = vcpu->run;
- vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vcpu, 1, vmx_pages_gva);
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
@@ -78,13 +109,21 @@ int main(void)
"No triple fault pending");
vcpu_run(vcpu);
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_DONE:
- break;
- case UCALL_ABORT:
- REPORT_GUEST_ASSERT(uc);
- default:
- TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
- }
+ if (has_svm) {
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+ } else {
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_DONE:
+ break;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ default:
+ TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+ }
+ }
+ return 0;
}
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index 31c3b6ebd388..21ca91473c09 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -4196,10 +4196,13 @@ elif [ "$TESTS" = "ipv6" ]; then
TESTS="$TESTS_IPV6"
fi
-which nettest >/dev/null
-if [ $? -ne 0 ]; then
- echo "'nettest' command not found; skipping tests"
- exit $ksft_skip
+# nettest can be run from PATH or from same directory as this selftest
+if ! which nettest >/dev/null; then
+ PATH=$PWD:$PATH
+ if ! which nettest >/dev/null; then
+ echo "'nettest' command not found; skipping tests"
+ exit $ksft_skip
+ fi
fi
declare -i nfail=0
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index ee5e98204d3d..a47b26ab48f2 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -1228,6 +1228,17 @@ ipv4_fcnal()
run_cmd "$IP ro add 172.16.101.0/24 nhid 21"
run_cmd "$IP ro del 172.16.101.0/24 nexthop via 172.16.1.7 dev veth1 nexthop via 172.16.1.8 dev veth1"
log_test $? 2 "Delete multipath route with only nh id based entry"
+
+ run_cmd "$IP nexthop add id 22 via 172.16.1.6 dev veth1"
+ run_cmd "$IP ro add 172.16.102.0/24 nhid 22"
+ run_cmd "$IP ro del 172.16.102.0/24 dev veth1"
+ log_test $? 2 "Delete route when specifying only nexthop device"
+
+ run_cmd "$IP ro del 172.16.102.0/24 via 172.16.1.6"
+ log_test $? 2 "Delete route when specifying only gateway"
+
+ run_cmd "$IP ro del 172.16.102.0/24"
+ log_test $? 0 "Delete route when not specifying nexthop attributes"
}
ipv4_grp_fcnal()
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index f3dd5f2a0272..2eeaf4aca644 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -2152,7 +2152,7 @@ remove_tests()
pm_nl_set_limits $ns2 1 3
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 -1 -2 slow
+ run_tests $ns1 $ns2 10.0.1.1 0 -1 -2 speed_10
chk_join_nr 3 3 3
chk_add_nr 1 1
chk_rm_nr 2 2
@@ -2165,7 +2165,7 @@ remove_tests()
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
pm_nl_set_limits $ns2 3 3
- run_tests $ns1 $ns2 10.0.1.1 0 -3 0 slow
+ run_tests $ns1 $ns2 10.0.1.1 0 -3 0 speed_10
chk_join_nr 3 3 3
chk_add_nr 3 3
chk_rm_nr 3 3 invert
@@ -2178,7 +2178,7 @@ remove_tests()
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
pm_nl_set_limits $ns2 3 3
- run_tests $ns1 $ns2 10.0.1.1 0 -3 0 slow
+ run_tests $ns1 $ns2 10.0.1.1 0 -3 0 speed_10
chk_join_nr 1 1 1
chk_add_nr 3 3
chk_rm_nr 3 1 invert
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
index 0879da915014..80d36f7cfee8 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
@@ -35,8 +35,9 @@ init()
ns1="ns1-$rndh"
ns2="ns2-$rndh"
+ ns_sbox="ns_sbox-$rndh"
- for netns in "$ns1" "$ns2";do
+ for netns in "$ns1" "$ns2" "$ns_sbox";do
ip netns add $netns || exit $ksft_skip
ip -net $netns link set lo up
ip netns exec $netns sysctl -q net.mptcp.enabled=1
@@ -73,7 +74,7 @@ init()
cleanup()
{
- for netns in "$ns1" "$ns2"; do
+ for netns in "$ns1" "$ns2" "$ns_sbox"; do
ip netns del $netns
done
rm -f "$cin" "$cout"
@@ -243,7 +244,7 @@ do_mptcp_sockopt_tests()
{
local lret=0
- ./mptcp_sockopt
+ ip netns exec "$ns_sbox" ./mptcp_sockopt
lret=$?
if [ $lret -ne 0 ]; then
@@ -252,7 +253,7 @@ do_mptcp_sockopt_tests()
return
fi
- ./mptcp_sockopt -6
+ ip netns exec "$ns_sbox" ./mptcp_sockopt -6
lret=$?
if [ $lret -ne 0 ]; then
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index ffa13a957a36..40aeb5a71a2a 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -247,9 +247,10 @@ run_test()
tc -n $ns2 qdisc add dev ns2eth1 root netem rate ${rate1}mbit $delay1
tc -n $ns2 qdisc add dev ns2eth2 root netem rate ${rate2}mbit $delay2
- # time is measured in ms, account for transfer size, affegated link speed
+ # time is measured in ms, account for transfer size, aggregated link speed
# and header overhead (10%)
- local time=$((size * 8 * 1000 * 10 / (( $rate1 + $rate2) * 1024 *1024 * 9) ))
+ # ms byte -> bit 10% mbit -> kbit -> bit 10%
+ local time=$((1000 * size * 8 * 10 / ((rate1 + rate2) * 1000 * 1000 * 9) ))
# mptcp_connect will do some sleeps to allow the mp_join handshake
# completion (see mptcp_connect): 200ms on each side, add some slack
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index 736e358dc549..dfe3d287f01d 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -686,10 +686,12 @@ setup_xfrm() {
}
setup_nettest_xfrm() {
- which nettest >/dev/null
- if [ $? -ne 0 ]; then
- echo "'nettest' command not found; skipping tests"
- return 1
+ if ! which nettest >/dev/null; then
+ PATH=$PWD:$PATH
+ if ! which nettest >/dev/null; then
+ echo "'nettest' command not found; skipping tests"
+ return 1
+ fi
fi
[ ${1} -eq 6 ] && proto="-6" || proto=""
diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
index 6a443ca3cd3a..0c743752669a 100755
--- a/tools/testing/selftests/net/udpgro.sh
+++ b/tools/testing/selftests/net/udpgro.sh
@@ -5,6 +5,8 @@
readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+BPF_FILE="../bpf/xdp_dummy.bpf.o"
+
# set global exit status, but never reset nonzero one.
check_err()
{
@@ -34,7 +36,7 @@ cfg_veth() {
ip -netns "${PEER_NS}" addr add dev veth1 192.168.1.1/24
ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
ip -netns "${PEER_NS}" link set dev veth1 up
- ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp
+ ip -n "${PEER_NS}" link set veth1 xdp object ${BPF_FILE} section xdp
}
run_one() {
@@ -195,8 +197,8 @@ run_all() {
return $ret
}
-if [ ! -f ../bpf/xdp_dummy.o ]; then
- echo "Missing xdp_dummy helper. Build bpf selftest first"
+if [ ! -f ${BPF_FILE} ]; then
+ echo "Missing ${BPF_FILE}. Build bpf selftest first"
exit -1
fi
diff --git a/tools/testing/selftests/net/udpgro_bench.sh b/tools/testing/selftests/net/udpgro_bench.sh
index 8a1109a545db..894972877e8b 100755
--- a/tools/testing/selftests/net/udpgro_bench.sh
+++ b/tools/testing/selftests/net/udpgro_bench.sh
@@ -5,6 +5,8 @@
readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+BPF_FILE="../bpf/xdp_dummy.bpf.o"
+
cleanup() {
local -r jobs="$(jobs -p)"
local -r ns="$(ip netns list|grep $PEER_NS)"
@@ -34,7 +36,7 @@ run_one() {
ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
ip -netns "${PEER_NS}" link set dev veth1 up
- ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp
+ ip -n "${PEER_NS}" link set veth1 xdp object ${BPF_FILE} section xdp
ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
ip netns exec "${PEER_NS}" ./udpgso_bench_rx -t ${rx_args} -r &
@@ -80,8 +82,8 @@ run_all() {
run_udp "${ipv6_args}"
}
-if [ ! -f ../bpf/xdp_dummy.o ]; then
- echo "Missing xdp_dummy helper. Build bpf selftest first"
+if [ ! -f ${BPF_FILE} ]; then
+ echo "Missing ${BPF_FILE}. Build bpf selftest first"
exit -1
fi
diff --git a/tools/testing/selftests/net/udpgro_frglist.sh b/tools/testing/selftests/net/udpgro_frglist.sh
index 7fe85ba51075..c9c4b9d65839 100755
--- a/tools/testing/selftests/net/udpgro_frglist.sh
+++ b/tools/testing/selftests/net/udpgro_frglist.sh
@@ -5,6 +5,8 @@
readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+BPF_FILE="../bpf/xdp_dummy.bpf.o"
+
cleanup() {
local -r jobs="$(jobs -p)"
local -r ns="$(ip netns list|grep $PEER_NS)"
@@ -36,7 +38,7 @@ run_one() {
ip netns exec "${PEER_NS}" ethtool -K veth1 rx-gro-list on
- ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp
+ ip -n "${PEER_NS}" link set veth1 xdp object ${BPF_FILE} section xdp
tc -n "${PEER_NS}" qdisc add dev veth1 clsact
tc -n "${PEER_NS}" filter add dev veth1 ingress prio 4 protocol ipv6 bpf object-file ../bpf/nat6to4.o section schedcls/ingress6/nat_6 direct-action
tc -n "${PEER_NS}" filter add dev veth1 egress prio 4 protocol ip bpf object-file ../bpf/nat6to4.o section schedcls/egress4/snat4 direct-action
@@ -81,8 +83,8 @@ run_all() {
run_udp "${ipv6_args}"
}
-if [ ! -f ../bpf/xdp_dummy.o ]; then
- echo "Missing xdp_dummy helper. Build bpf selftest first"
+if [ ! -f ${BPF_FILE} ]; then
+ echo "Missing ${BPF_FILE}. Build bpf selftest first"
exit -1
fi
diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
index 1bcd82e1f662..c079565add39 100755
--- a/tools/testing/selftests/net/udpgro_fwd.sh
+++ b/tools/testing/selftests/net/udpgro_fwd.sh
@@ -1,6 +1,7 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+BPF_FILE="../bpf/xdp_dummy.bpf.o"
readonly BASE="ns-$(mktemp -u XXXXXX)"
readonly SRC=2
readonly DST=1
@@ -46,7 +47,7 @@ create_ns() {
ip -n $BASE$ns addr add dev veth$ns $BM_NET_V4$ns/24
ip -n $BASE$ns addr add dev veth$ns $BM_NET_V6$ns/64 nodad
done
- ip -n $NS_DST link set veth$DST xdp object ../bpf/xdp_dummy.o section xdp 2>/dev/null
+ ip -n $NS_DST link set veth$DST xdp object ${BPF_FILE} section xdp 2>/dev/null
}
create_vxlan_endpoint() {
diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh
index 430895d1a2b6..2d073595c620 100755
--- a/tools/testing/selftests/net/veth.sh
+++ b/tools/testing/selftests/net/veth.sh
@@ -1,6 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
+BPF_FILE="../bpf/xdp_dummy.bpf.o"
readonly STATS="$(mktemp -p /tmp ns-XXXXXX)"
readonly BASE=`basename $STATS`
readonly SRC=2
@@ -216,8 +217,8 @@ while getopts "hs:" option; do
esac
done
-if [ ! -f ../bpf/xdp_dummy.o ]; then
- echo "Missing xdp_dummy helper. Build bpf selftest first"
+if [ ! -f ${BPF_FILE} ]; then
+ echo "Missing ${BPF_FILE}. Build bpf selftest first"
exit 1
fi
@@ -288,14 +289,14 @@ if [ $CPUS -gt 1 ]; then
ip netns exec $NS_DST ethtool -L veth$DST rx 1 tx 2 2>/dev/null
ip netns exec $NS_SRC ethtool -L veth$SRC rx 1 tx 2 2>/dev/null
printf "%-60s" "bad setting: XDP with RX nr less than TX"
- ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o \
+ ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} \
section xdp 2>/dev/null &&\
echo "fail - set operation successful ?!?" || echo " ok "
# the following tests will run with multiple channels active
ip netns exec $NS_SRC ethtool -L veth$SRC rx 2
ip netns exec $NS_DST ethtool -L veth$DST rx 2
- ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o \
+ ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} \
section xdp 2>/dev/null
printf "%-60s" "bad setting: reducing RX nr below peer TX with XDP set"
ip netns exec $NS_DST ethtool -L veth$DST rx 1 2>/dev/null &&\
@@ -311,7 +312,7 @@ if [ $CPUS -gt 2 ]; then
chk_channels "setting invalid channels nr" $DST 2 2
fi
-ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o section xdp 2>/dev/null
+ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp 2>/dev/null
chk_gro_flag "with xdp attached - gro flag" $DST on
chk_gro_flag " - peer gro flag" $SRC off
chk_tso_flag " - tso flag" $SRC off
diff --git a/tools/vm/slabinfo-gnuplot.sh b/tools/vm/slabinfo-gnuplot.sh
index 26e193ffd2a2..873a892147e5 100644
--- a/tools/vm/slabinfo-gnuplot.sh
+++ b/tools/vm/slabinfo-gnuplot.sh
@@ -150,7 +150,7 @@ do_preprocess()
let lines=3
out=`basename "$in"`"-slabs-by-loss"
`cat "$in" | grep -A "$lines" 'Slabs sorted by loss' |\
- egrep -iv '\-\-|Name|Slabs'\
+ grep -E -iv '\-\-|Name|Slabs'\
| awk '{print $1" "$4+$2*$3" "$4}' > "$out"`
if [ $? -eq 0 ]; then
do_slabs_plotting "$out"
@@ -159,7 +159,7 @@ do_preprocess()
let lines=3
out=`basename "$in"`"-slabs-by-size"
`cat "$in" | grep -A "$lines" 'Slabs sorted by size' |\
- egrep -iv '\-\-|Name|Slabs'\
+ grep -E -iv '\-\-|Name|Slabs'\
| awk '{print $1" "$4" "$4-$2*$3}' > "$out"`
if [ $? -eq 0 ]; then
do_slabs_plotting "$out"
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 25d7872b29c1..fab4d3790578 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1198,8 +1198,6 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
goto out_err_no_arch_destroy_vm;
}
- kvm->max_halt_poll_ns = halt_poll_ns;
-
r = kvm_arch_init_vm(kvm, type);
if (r)
goto out_err_no_arch_destroy_vm;
@@ -3377,9 +3375,6 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
if (val < grow_start)
val = grow_start;
- if (val > vcpu->kvm->max_halt_poll_ns)
- val = vcpu->kvm->max_halt_poll_ns;
-
vcpu->halt_poll_ns = val;
out:
trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
@@ -3483,6 +3478,24 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
}
}
+static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ if (kvm->override_halt_poll_ns) {
+ /*
+ * Ensure kvm->max_halt_poll_ns is not read before
+ * kvm->override_halt_poll_ns.
+ *
+ * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL.
+ */
+ smp_rmb();
+ return READ_ONCE(kvm->max_halt_poll_ns);
+ }
+
+ return READ_ONCE(halt_poll_ns);
+}
+
/*
* Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt
* polling is enabled, busy wait for a short time before blocking to avoid the
@@ -3491,12 +3504,18 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
*/
void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
{
+ unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
- bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
ktime_t start, cur, poll_end;
bool waited = false;
+ bool do_halt_poll;
u64 halt_ns;
+ if (vcpu->halt_poll_ns > max_halt_poll_ns)
+ vcpu->halt_poll_ns = max_halt_poll_ns;
+
+ do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
+
start = cur = poll_end = ktime_get();
if (do_halt_poll) {
ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
@@ -3535,18 +3554,21 @@ out:
update_halt_poll_stats(vcpu, start, poll_end, !waited);
if (halt_poll_allowed) {
+ /* Recompute the max halt poll time in case it changed. */
+ max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
+
if (!vcpu_valid_wakeup(vcpu)) {
shrink_halt_poll_ns(vcpu);
- } else if (vcpu->kvm->max_halt_poll_ns) {
+ } else if (max_halt_poll_ns) {
if (halt_ns <= vcpu->halt_poll_ns)
;
/* we had a long block, shrink polling */
else if (vcpu->halt_poll_ns &&
- halt_ns > vcpu->kvm->max_halt_poll_ns)
+ halt_ns > max_halt_poll_ns)
shrink_halt_poll_ns(vcpu);
/* we had a short halt and our poll time is too small */
- else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
- halt_ns < vcpu->kvm->max_halt_poll_ns)
+ else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
+ halt_ns < max_halt_poll_ns)
grow_halt_poll_ns(vcpu);
} else {
vcpu->halt_poll_ns = 0;
@@ -4581,6 +4603,16 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
return -EINVAL;
kvm->max_halt_poll_ns = cap->args[0];
+
+ /*
+ * Ensure kvm->override_halt_poll_ns does not become visible
+ * before kvm->max_halt_poll_ns.
+ *
+ * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns().
+ */
+ smp_wmb();
+ kvm->override_halt_poll_ns = true;
+
return 0;
}
case KVM_CAP_DIRTY_LOG_RING:
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 346e47f15572..7c248193ca26 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -297,7 +297,12 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
if (!gpc->valid || old_uhva != gpc->uhva) {
ret = hva_to_pfn_retry(kvm, gpc);
} else {
- /* If the HVA→PFN mapping was already valid, don't unmap it. */
+ /*
+ * If the HVA→PFN mapping was already valid, don't unmap it.
+ * But do update gpc->khva because the offset within the page
+ * may have changed.
+ */
+ gpc->khva = old_khva + page_offset;
old_pfn = KVM_PFN_ERR_FAULT;
old_khva = NULL;
ret = 0;